001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertTrue; 023import static org.junit.Assert.fail; 024 025import java.util.Optional; 026import java.util.Set; 027import org.apache.hadoop.fs.Path; 028import org.apache.hadoop.hbase.HBaseClassTestRule; 029import org.apache.hadoop.hbase.TableName; 030import org.apache.hadoop.hbase.master.MasterFileSystem; 031import org.apache.hadoop.hbase.testclassification.ClientTests; 032import org.apache.hadoop.hbase.testclassification.LargeTests; 033import org.apache.hadoop.hbase.util.Bytes; 034import org.apache.hadoop.hbase.util.CommonFSUtils; 035import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 036import org.apache.hadoop.hbase.util.FSTableDescriptors; 037import org.junit.Assert; 038import org.junit.ClassRule; 039import org.junit.Test; 040import org.junit.experimental.categories.Category; 041import org.junit.runner.RunWith; 042import org.junit.runners.Parameterized; 043 044/** 045 * Class to test asynchronous table admin operations 046 * @see TestAsyncTableAdminApi This test and it used to be joined it was taking longer than our ten 047 * minute timeout so they were split. 048 */ 049@RunWith(Parameterized.class) 050@Category({ LargeTests.class, ClientTests.class }) 051public class TestAsyncTableAdminApi2 extends TestAsyncAdminBase { 052 053 @ClassRule 054 public static final HBaseClassTestRule CLASS_RULE = 055 HBaseClassTestRule.forClass(TestAsyncTableAdminApi2.class); 056 057 @Test 058 public void testDisableCatalogTable() throws Exception { 059 try { 060 this.admin.disableTable(TableName.META_TABLE_NAME).join(); 061 fail("Expected to throw ConstraintException"); 062 } catch (Exception e) { 063 } 064 // Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table 065 // actually getting disabled by the disableTable() call. 066 createTableWithDefaultConf(tableName); 067 } 068 069 @Test 070 public void testAddColumnFamily() throws Exception { 071 // Create a table with two families 072 TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); 073 builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)); 074 admin.createTable(builder.build()).join(); 075 admin.disableTable(tableName).join(); 076 // Verify the table descriptor 077 verifyTableDescriptor(tableName, FAMILY_0); 078 079 // Modify the table removing one family and verify the descriptor 080 admin.addColumnFamily(tableName, ColumnFamilyDescriptorBuilder.of(FAMILY_1)).join(); 081 verifyTableDescriptor(tableName, FAMILY_0, FAMILY_1); 082 } 083 084 @Test 085 public void testAddSameColumnFamilyTwice() throws Exception { 086 // Create a table with one families 087 TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); 088 builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)); 089 admin.createTable(builder.build()).join(); 090 admin.disableTable(tableName).join(); 091 // Verify the table descriptor 092 verifyTableDescriptor(tableName, FAMILY_0); 093 094 // Modify the table removing one family and verify the descriptor 095 admin.addColumnFamily(tableName, ColumnFamilyDescriptorBuilder.of(FAMILY_1)).join(); 096 verifyTableDescriptor(tableName, FAMILY_0, FAMILY_1); 097 098 try { 099 // Add same column family again - expect failure 100 this.admin.addColumnFamily(tableName, ColumnFamilyDescriptorBuilder.of(FAMILY_1)).join(); 101 Assert.fail("Delete a non-exist column family should fail"); 102 } catch (Exception e) { 103 // Expected. 104 } 105 } 106 107 @Test 108 public void testModifyColumnFamily() throws Exception { 109 TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName); 110 ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(FAMILY_0); 111 int blockSize = cfd.getBlocksize(); 112 admin.createTable(tdBuilder.setColumnFamily(cfd).build()).join(); 113 admin.disableTable(tableName).join(); 114 // Verify the table descriptor 115 verifyTableDescriptor(tableName, FAMILY_0); 116 117 int newBlockSize = 2 * blockSize; 118 cfd = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_0).setBlocksize(newBlockSize).build(); 119 // Modify colymn family 120 admin.modifyColumnFamily(tableName, cfd).join(); 121 122 TableDescriptor htd = admin.getDescriptor(tableName).get(); 123 ColumnFamilyDescriptor hcfd = htd.getColumnFamily(FAMILY_0); 124 assertTrue(hcfd.getBlocksize() == newBlockSize); 125 } 126 127 @Test 128 public void testModifyNonExistingColumnFamily() throws Exception { 129 TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName); 130 ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(FAMILY_0); 131 int blockSize = cfd.getBlocksize(); 132 admin.createTable(tdBuilder.setColumnFamily(cfd).build()).join(); 133 admin.disableTable(tableName).join(); 134 // Verify the table descriptor 135 verifyTableDescriptor(tableName, FAMILY_0); 136 137 int newBlockSize = 2 * blockSize; 138 cfd = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_1).setBlocksize(newBlockSize).build(); 139 140 // Modify a column family that is not in the table. 141 try { 142 admin.modifyColumnFamily(tableName, cfd).join(); 143 Assert.fail("Modify a non-exist column family should fail"); 144 } catch (Exception e) { 145 // Expected. 146 } 147 } 148 149 @Test 150 public void testDeleteColumnFamily() throws Exception { 151 // Create a table with two families 152 TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); 153 builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)) 154 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1)); 155 admin.createTable(builder.build()).join(); 156 admin.disableTable(tableName).join(); 157 // Verify the table descriptor 158 verifyTableDescriptor(tableName, FAMILY_0, FAMILY_1); 159 160 // Modify the table removing one family and verify the descriptor 161 admin.deleteColumnFamily(tableName, FAMILY_1).join(); 162 verifyTableDescriptor(tableName, FAMILY_0); 163 } 164 165 @Test 166 public void testDeleteSameColumnFamilyTwice() throws Exception { 167 // Create a table with two families 168 TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); 169 builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)) 170 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1)); 171 admin.createTable(builder.build()).join(); 172 admin.disableTable(tableName).join(); 173 // Verify the table descriptor 174 verifyTableDescriptor(tableName, FAMILY_0, FAMILY_1); 175 176 // Modify the table removing one family and verify the descriptor 177 admin.deleteColumnFamily(tableName, FAMILY_1).join(); 178 verifyTableDescriptor(tableName, FAMILY_0); 179 180 try { 181 // Delete again - expect failure 182 admin.deleteColumnFamily(tableName, FAMILY_1).join(); 183 Assert.fail("Delete a non-exist column family should fail"); 184 } catch (Exception e) { 185 // Expected. 186 } 187 } 188 189 private void verifyTableDescriptor(final TableName tableName, final byte[]... families) 190 throws Exception { 191 // Verify descriptor from master 192 TableDescriptor htd = admin.getDescriptor(tableName).get(); 193 verifyTableDescriptor(htd, tableName, families); 194 195 // Verify descriptor from HDFS 196 MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); 197 Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), tableName); 198 TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); 199 verifyTableDescriptor(td, tableName, families); 200 } 201 202 private void verifyTableDescriptor(final TableDescriptor htd, final TableName tableName, 203 final byte[]... families) { 204 Set<byte[]> htdFamilies = htd.getColumnFamilyNames(); 205 assertEquals(tableName, htd.getTableName()); 206 assertEquals(families.length, htdFamilies.size()); 207 for (byte[] familyName : families) { 208 assertTrue("Expected family " + Bytes.toString(familyName), htdFamilies.contains(familyName)); 209 } 210 } 211 212 @Test 213 public void testTableAvailableWithRandomSplitKeys() throws Exception { 214 createTableWithDefaultConf(tableName); 215 byte[][] splitKeys = new byte[1][]; 216 splitKeys = new byte[][] { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 } }; 217 boolean tableAvailable = admin.isTableAvailable(tableName, splitKeys).get(); 218 assertFalse("Table should be created with 1 row in META", tableAvailable); 219 } 220 221 @Test 222 public void testCompactionTimestamps() throws Exception { 223 createTableWithDefaultConf(tableName); 224 AsyncTable<?> table = ASYNC_CONN.getTable(tableName); 225 Optional<Long> ts = admin.getLastMajorCompactionTimestamp(tableName).get(); 226 assertFalse(ts.isPresent()); 227 Put p = new Put(Bytes.toBytes("row1")); 228 p.addColumn(FAMILY, Bytes.toBytes("q"), Bytes.toBytes("v")); 229 table.put(p).join(); 230 ts = admin.getLastMajorCompactionTimestamp(tableName).get(); 231 // no files written -> no data 232 assertFalse(ts.isPresent()); 233 234 admin.flush(tableName).join(); 235 ts = admin.getLastMajorCompactionTimestamp(tableName).get(); 236 // still 0, we flushed a file, but no major compaction happened 237 assertFalse(ts.isPresent()); 238 239 byte[] regionName = ASYNC_CONN.getRegionLocator(tableName) 240 .getRegionLocation(Bytes.toBytes("row1")).get().getRegion().getRegionName(); 241 Optional<Long> ts1 = admin.getLastMajorCompactionTimestampForRegion(regionName).get(); 242 assertFalse(ts1.isPresent()); 243 p = new Put(Bytes.toBytes("row2")); 244 p.addColumn(FAMILY, Bytes.toBytes("q"), Bytes.toBytes("v")); 245 table.put(p).join(); 246 admin.flush(tableName).join(); 247 ts1 = admin.getLastMajorCompactionTimestamp(tableName).get(); 248 // make sure the region API returns the same value, as the old file is still around 249 assertFalse(ts1.isPresent()); 250 251 for (int i = 0; i < 3; i++) { 252 table.put(p).join(); 253 admin.flush(tableName).join(); 254 } 255 admin.majorCompact(tableName).join(); 256 long curt = EnvironmentEdgeManager.currentTime(); 257 long waitTime = 10000; 258 long endt = curt + waitTime; 259 CompactionState state = admin.getCompactionState(tableName).get(); 260 LOG.info("Current compaction state 1 is " + state); 261 while (state == CompactionState.NONE && curt < endt) { 262 Thread.sleep(100); 263 state = admin.getCompactionState(tableName).get(); 264 curt = EnvironmentEdgeManager.currentTime(); 265 LOG.info("Current compaction state 2 is " + state); 266 } 267 // Now, should have the right compaction state, let's wait until the compaction is done 268 if (state == CompactionState.MAJOR) { 269 state = admin.getCompactionState(tableName).get(); 270 LOG.info("Current compaction state 3 is " + state); 271 while (state != CompactionState.NONE && curt < endt) { 272 Thread.sleep(10); 273 state = admin.getCompactionState(tableName).get(); 274 LOG.info("Current compaction state 4 is " + state); 275 } 276 } 277 // Sleep to wait region server report 278 Thread 279 .sleep(TEST_UTIL.getConfiguration().getInt("hbase.regionserver.msginterval", 3 * 1000) * 2); 280 281 ts = admin.getLastMajorCompactionTimestamp(tableName).get(); 282 // after a compaction our earliest timestamp will have progressed forward 283 assertTrue(ts.isPresent()); 284 assertTrue(ts.get() > 0); 285 // region api still the same 286 ts1 = admin.getLastMajorCompactionTimestampForRegion(regionName).get(); 287 assertTrue(ts1.isPresent()); 288 assertEquals(ts.get(), ts1.get()); 289 table.put(p).join(); 290 admin.flush(tableName).join(); 291 ts = admin.getLastMajorCompactionTimestamp(tableName).join(); 292 assertTrue(ts.isPresent()); 293 assertEquals(ts.get(), ts1.get()); 294 ts1 = admin.getLastMajorCompactionTimestampForRegion(regionName).get(); 295 assertTrue(ts1.isPresent()); 296 assertEquals(ts.get(), ts1.get()); 297 } 298}