001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertNotNull; 023import static org.junit.Assert.assertTrue; 024import static org.junit.Assert.fail; 025 026import java.io.IOException; 027import java.util.ArrayList; 028import java.util.HashMap; 029import java.util.Iterator; 030import java.util.List; 031import java.util.Map; 032import java.util.concurrent.TimeUnit; 033import java.util.concurrent.atomic.AtomicInteger; 034import org.apache.hadoop.hbase.HBaseClassTestRule; 035import org.apache.hadoop.hbase.HBaseTestingUtility; 036import org.apache.hadoop.hbase.HColumnDescriptor; 037import org.apache.hadoop.hbase.HConstants; 038import org.apache.hadoop.hbase.HRegionLocation; 039import org.apache.hadoop.hbase.HTableDescriptor; 040import org.apache.hadoop.hbase.InvalidFamilyOperationException; 041import org.apache.hadoop.hbase.MetaTableAccessor; 042import org.apache.hadoop.hbase.ServerName; 043import org.apache.hadoop.hbase.TableName; 044import org.apache.hadoop.hbase.TableNotDisabledException; 045import org.apache.hadoop.hbase.TableNotEnabledException; 046import org.apache.hadoop.hbase.TableNotFoundException; 047import org.apache.hadoop.hbase.exceptions.MergeRegionException; 048import org.apache.hadoop.hbase.master.LoadBalancer; 049import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; 050import org.apache.hadoop.hbase.regionserver.HRegion; 051import org.apache.hadoop.hbase.regionserver.HStore; 052import org.apache.hadoop.hbase.regionserver.HStoreFile; 053import org.apache.hadoop.hbase.testclassification.ClientTests; 054import org.apache.hadoop.hbase.testclassification.LargeTests; 055import org.apache.hadoop.hbase.util.Bytes; 056import org.apache.hadoop.hbase.util.FSUtils; 057import org.apache.hadoop.hbase.util.Pair; 058import org.apache.hadoop.hbase.util.Threads; 059import org.junit.After; 060import org.junit.AfterClass; 061import org.junit.Before; 062import org.junit.BeforeClass; 063import org.junit.ClassRule; 064import org.junit.Rule; 065import org.junit.Test; 066import org.junit.experimental.categories.Category; 067import org.junit.rules.TestName; 068import org.slf4j.Logger; 069import org.slf4j.LoggerFactory; 070 071import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; 072import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; 073 074/** 075 * Class to test HBaseAdmin. 076 * Spins up the minicluster once at test start and then takes it down afterward. 077 * Add any testing of HBaseAdmin functionality here. 078 */ 079@Category({LargeTests.class, ClientTests.class}) 080public class TestAdmin1 { 081 082 @ClassRule 083 public static final HBaseClassTestRule CLASS_RULE = 084 HBaseClassTestRule.forClass(TestAdmin1.class); 085 086 private static final Logger LOG = LoggerFactory.getLogger(TestAdmin1.class); 087 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 088 private Admin admin; 089 090 @Rule 091 public TestName name = new TestName(); 092 093 @BeforeClass 094 public static void setUpBeforeClass() throws Exception { 095 TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); 096 TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); 097 TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6); 098 TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true); 099 TEST_UTIL.startMiniCluster(3); 100 } 101 102 @AfterClass 103 public static void tearDownAfterClass() throws Exception { 104 TEST_UTIL.shutdownMiniCluster(); 105 } 106 107 @Before 108 public void setUp() throws Exception { 109 this.admin = TEST_UTIL.getAdmin(); 110 } 111 112 @After 113 public void tearDown() throws Exception { 114 for (HTableDescriptor htd : this.admin.listTables()) { 115 TEST_UTIL.deleteTable(htd.getTableName()); 116 } 117 } 118 119 @Test 120 public void testSplitFlushCompactUnknownTable() throws InterruptedException { 121 final TableName unknowntable = TableName.valueOf(name.getMethodName()); 122 Exception exception = null; 123 try { 124 this.admin.compact(unknowntable); 125 } catch (IOException e) { 126 exception = e; 127 } 128 assertTrue(exception instanceof TableNotFoundException); 129 130 exception = null; 131 try { 132 this.admin.flush(unknowntable); 133 } catch (IOException e) { 134 exception = e; 135 } 136 assertTrue(exception instanceof TableNotFoundException); 137 138 exception = null; 139 try { 140 this.admin.split(unknowntable); 141 } catch (IOException e) { 142 exception = e; 143 } 144 assertTrue(exception instanceof TableNotFoundException); 145 } 146 147 @Test 148 public void testDeleteEditUnknownColumnFamilyAndOrTable() throws IOException { 149 // Test we get exception if we try to 150 final TableName nonexistentTable = TableName.valueOf("nonexistent"); 151 final byte[] nonexistentColumn = Bytes.toBytes("nonexistent"); 152 HColumnDescriptor nonexistentHcd = new HColumnDescriptor(nonexistentColumn); 153 Exception exception = null; 154 try { 155 this.admin.addColumnFamily(nonexistentTable, nonexistentHcd); 156 } catch (IOException e) { 157 exception = e; 158 } 159 assertTrue(exception instanceof TableNotFoundException); 160 161 exception = null; 162 try { 163 this.admin.deleteTable(nonexistentTable); 164 } catch (IOException e) { 165 exception = e; 166 } 167 assertTrue(exception instanceof TableNotFoundException); 168 169 exception = null; 170 try { 171 this.admin.deleteColumnFamily(nonexistentTable, nonexistentColumn); 172 } catch (IOException e) { 173 exception = e; 174 } 175 assertTrue(exception instanceof TableNotFoundException); 176 177 exception = null; 178 try { 179 this.admin.disableTable(nonexistentTable); 180 } catch (IOException e) { 181 exception = e; 182 } 183 assertTrue(exception instanceof TableNotFoundException); 184 185 exception = null; 186 try { 187 this.admin.enableTable(nonexistentTable); 188 } catch (IOException e) { 189 exception = e; 190 } 191 assertTrue(exception instanceof TableNotFoundException); 192 193 exception = null; 194 try { 195 this.admin.modifyColumnFamily(nonexistentTable, nonexistentHcd); 196 } catch (IOException e) { 197 exception = e; 198 } 199 assertTrue(exception instanceof TableNotFoundException); 200 201 exception = null; 202 try { 203 HTableDescriptor htd = new HTableDescriptor(nonexistentTable); 204 htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); 205 this.admin.modifyTable(htd.getTableName(), htd); 206 } catch (IOException e) { 207 exception = e; 208 } 209 assertTrue(exception instanceof TableNotFoundException); 210 211 // Now make it so at least the table exists and then do tests against a 212 // nonexistent column family -- see if we get right exceptions. 213 final TableName tableName = TableName.valueOf(name.getMethodName() + System.currentTimeMillis()); 214 HTableDescriptor htd = new HTableDescriptor(tableName); 215 htd.addFamily(new HColumnDescriptor("cf")); 216 this.admin.createTable(htd); 217 try { 218 exception = null; 219 try { 220 this.admin.deleteColumnFamily(htd.getTableName(), nonexistentHcd.getName()); 221 } catch (IOException e) { 222 exception = e; 223 } 224 assertTrue("found=" + exception.getClass().getName(), 225 exception instanceof InvalidFamilyOperationException); 226 227 exception = null; 228 try { 229 this.admin.modifyColumnFamily(htd.getTableName(), nonexistentHcd); 230 } catch (IOException e) { 231 exception = e; 232 } 233 assertTrue("found=" + exception.getClass().getName(), 234 exception instanceof InvalidFamilyOperationException); 235 } finally { 236 this.admin.disableTable(tableName); 237 this.admin.deleteTable(tableName); 238 } 239 } 240 241 @Test 242 public void testDisableAndEnableTable() throws IOException { 243 final byte [] row = Bytes.toBytes("row"); 244 final byte [] qualifier = Bytes.toBytes("qualifier"); 245 final byte [] value = Bytes.toBytes("value"); 246 final TableName table = TableName.valueOf(name.getMethodName()); 247 Table ht = TEST_UTIL.createTable(table, HConstants.CATALOG_FAMILY); 248 Put put = new Put(row); 249 put.addColumn(HConstants.CATALOG_FAMILY, qualifier, value); 250 ht.put(put); 251 Get get = new Get(row); 252 get.addColumn(HConstants.CATALOG_FAMILY, qualifier); 253 ht.get(get); 254 255 this.admin.disableTable(ht.getName()); 256 assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster() 257 .getMaster().getTableStateManager().isTableState( 258 ht.getName(), TableState.State.DISABLED)); 259 assertEquals(TableState.State.DISABLED, getStateFromMeta(table)); 260 261 // Test that table is disabled 262 get = new Get(row); 263 get.addColumn(HConstants.CATALOG_FAMILY, qualifier); 264 boolean ok = false; 265 try { 266 ht.get(get); 267 } catch (TableNotEnabledException e) { 268 ok = true; 269 } 270 ok = false; 271 // verify that scan encounters correct exception 272 Scan scan = new Scan(); 273 try { 274 ResultScanner scanner = ht.getScanner(scan); 275 Result res = null; 276 do { 277 res = scanner.next(); 278 } while (res != null); 279 } catch (TableNotEnabledException e) { 280 ok = true; 281 } 282 assertTrue(ok); 283 this.admin.enableTable(table); 284 assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() 285 .getMaster().getTableStateManager().isTableState( 286 ht.getName(), TableState.State.ENABLED)); 287 assertEquals(TableState.State.ENABLED, getStateFromMeta(table)); 288 289 // Test that table is enabled 290 try { 291 ht.get(get); 292 } catch (RetriesExhaustedException e) { 293 ok = false; 294 } 295 assertTrue(ok); 296 ht.close(); 297 } 298 299 private TableState.State getStateFromMeta(TableName table) throws IOException { 300 TableState state = 301 MetaTableAccessor.getTableState(TEST_UTIL.getConnection(), table); 302 assertNotNull(state); 303 return state.getState(); 304 } 305 306 @Test 307 public void testDisableAndEnableTables() throws IOException { 308 final byte [] row = Bytes.toBytes("row"); 309 final byte [] qualifier = Bytes.toBytes("qualifier"); 310 final byte [] value = Bytes.toBytes("value"); 311 final TableName table1 = TableName.valueOf(name.getMethodName() + "1"); 312 final TableName table2 = TableName.valueOf(name.getMethodName() + "2"); 313 Table ht1 = TEST_UTIL.createTable(table1, HConstants.CATALOG_FAMILY); 314 Table ht2 = TEST_UTIL.createTable(table2, HConstants.CATALOG_FAMILY); 315 Put put = new Put(row); 316 put.addColumn(HConstants.CATALOG_FAMILY, qualifier, value); 317 ht1.put(put); 318 ht2.put(put); 319 Get get = new Get(row); 320 get.addColumn(HConstants.CATALOG_FAMILY, qualifier); 321 ht1.get(get); 322 ht2.get(get); 323 324 this.admin.disableTables("testDisableAndEnableTable.*"); 325 326 // Test that tables are disabled 327 get = new Get(row); 328 get.addColumn(HConstants.CATALOG_FAMILY, qualifier); 329 boolean ok = false; 330 try { 331 ht1.get(get); 332 ht2.get(get); 333 } catch (org.apache.hadoop.hbase.DoNotRetryIOException e) { 334 ok = true; 335 } 336 337 assertEquals(TableState.State.DISABLED, getStateFromMeta(table1)); 338 assertEquals(TableState.State.DISABLED, getStateFromMeta(table2)); 339 340 341 assertTrue(ok); 342 this.admin.enableTables("testDisableAndEnableTable.*"); 343 344 // Test that tables are enabled 345 try { 346 ht1.get(get); 347 } catch (IOException e) { 348 ok = false; 349 } 350 try { 351 ht2.get(get); 352 } catch (IOException e) { 353 ok = false; 354 } 355 assertTrue(ok); 356 357 ht1.close(); 358 ht2.close(); 359 360 assertEquals(TableState.State.ENABLED, getStateFromMeta(table1)); 361 assertEquals(TableState.State.ENABLED, getStateFromMeta(table2)); 362 } 363 364 @Test 365 public void testCreateTable() throws IOException { 366 HTableDescriptor [] tables = admin.listTables(); 367 int numTables = tables.length; 368 final TableName tableName = TableName.valueOf(name.getMethodName()); 369 TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); 370 tables = this.admin.listTables(); 371 assertEquals(numTables + 1, tables.length); 372 assertTrue("Table must be enabled.", 373 TEST_UTIL.getHBaseCluster().getMaster().getTableStateManager() 374 .isTableState(tableName, TableState.State.ENABLED)); 375 assertEquals(TableState.State.ENABLED, getStateFromMeta(tableName)); 376 } 377 378 @Test 379 public void testTruncateTable() throws IOException { 380 testTruncateTable(TableName.valueOf(name.getMethodName()), false); 381 } 382 383 @Test 384 public void testTruncateTablePreservingSplits() throws IOException { 385 testTruncateTable(TableName.valueOf(name.getMethodName()), true); 386 } 387 388 private void testTruncateTable(final TableName tableName, boolean preserveSplits) 389 throws IOException { 390 byte[][] splitKeys = new byte[2][]; 391 splitKeys[0] = Bytes.toBytes(4); 392 splitKeys[1] = Bytes.toBytes(8); 393 394 // Create & Fill the table 395 Table table = TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY, splitKeys); 396 try { 397 TEST_UTIL.loadNumericRows(table, HConstants.CATALOG_FAMILY, 0, 10); 398 assertEquals(10, TEST_UTIL.countRows(table)); 399 } finally { 400 table.close(); 401 } 402 assertEquals(3, TEST_UTIL.getHBaseCluster().getRegions(tableName).size()); 403 404 // Truncate & Verify 405 this.admin.disableTable(tableName); 406 this.admin.truncateTable(tableName, preserveSplits); 407 table = TEST_UTIL.getConnection().getTable(tableName); 408 try { 409 assertEquals(0, TEST_UTIL.countRows(table)); 410 } finally { 411 table.close(); 412 } 413 if (preserveSplits) { 414 assertEquals(3, TEST_UTIL.getHBaseCluster().getRegions(tableName).size()); 415 } else { 416 assertEquals(1, TEST_UTIL.getHBaseCluster().getRegions(tableName).size()); 417 } 418 } 419 420 @Test 421 public void testGetTableDescriptor() throws IOException { 422 HColumnDescriptor fam1 = new HColumnDescriptor("fam1"); 423 HColumnDescriptor fam2 = new HColumnDescriptor("fam2"); 424 HColumnDescriptor fam3 = new HColumnDescriptor("fam3"); 425 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())); 426 htd.addFamily(fam1); 427 htd.addFamily(fam2); 428 htd.addFamily(fam3); 429 this.admin.createTable(htd); 430 Table table = TEST_UTIL.getConnection().getTable(htd.getTableName()); 431 TableDescriptor confirmedHtd = table.getDescriptor(); 432 assertEquals(0, TableDescriptor.COMPARATOR.compare(htd, confirmedHtd)); 433 MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection()); 434 table.close(); 435 } 436 437 @Test 438 public void testCompactionTimestamps() throws Exception { 439 HColumnDescriptor fam1 = new HColumnDescriptor("fam1"); 440 final TableName tableName = TableName.valueOf(name.getMethodName()); 441 HTableDescriptor htd = new HTableDescriptor(tableName); 442 htd.addFamily(fam1); 443 this.admin.createTable(htd); 444 Table table = TEST_UTIL.getConnection().getTable(htd.getTableName()); 445 long ts = this.admin.getLastMajorCompactionTimestamp(tableName); 446 assertEquals(0, ts); 447 Put p = new Put(Bytes.toBytes("row1")); 448 p.addColumn(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1")); 449 table.put(p); 450 ts = this.admin.getLastMajorCompactionTimestamp(tableName); 451 // no files written -> no data 452 assertEquals(0, ts); 453 454 this.admin.flush(tableName); 455 ts = this.admin.getLastMajorCompactionTimestamp(tableName); 456 // still 0, we flushed a file, but no major compaction happened 457 assertEquals(0, ts); 458 459 byte[] regionName; 460 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) { 461 regionName = l.getAllRegionLocations().get(0).getRegionInfo().getRegionName(); 462 } 463 long ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName); 464 assertEquals(ts, ts1); 465 p = new Put(Bytes.toBytes("row2")); 466 p.addColumn(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1")); 467 table.put(p); 468 this.admin.flush(tableName); 469 ts = this.admin.getLastMajorCompactionTimestamp(tableName); 470 // make sure the region API returns the same value, as the old file is still around 471 assertEquals(ts1, ts); 472 473 TEST_UTIL.compact(tableName, true); 474 table.put(p); 475 // forces a wait for the compaction 476 this.admin.flush(tableName); 477 ts = this.admin.getLastMajorCompactionTimestamp(tableName); 478 // after a compaction our earliest timestamp will have progressed forward 479 assertTrue(ts > ts1); 480 481 // region api still the same 482 ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName); 483 assertEquals(ts, ts1); 484 table.put(p); 485 this.admin.flush(tableName); 486 ts = this.admin.getLastMajorCompactionTimestamp(tableName); 487 assertEquals(ts, ts1); 488 table.close(); 489 } 490 491 @Test 492 public void testHColumnValidName() { 493 boolean exceptionThrown; 494 try { 495 new HColumnDescriptor("\\test\\abc"); 496 } catch(IllegalArgumentException iae) { 497 exceptionThrown = true; 498 assertTrue(exceptionThrown); 499 } 500 } 501 502 /** 503 * Verify schema change for read only table 504 */ 505 @Test 506 public void testReadOnlyTableModify() throws IOException, InterruptedException { 507 final TableName tableName = TableName.valueOf(name.getMethodName()); 508 TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); 509 510 // Make table read only 511 TableDescriptor htd = TableDescriptorBuilder.newBuilder(this.admin.getDescriptor(tableName)) 512 .setReadOnly(true).build(); 513 admin.modifyTable(htd); 514 515 // try to modify the read only table now 516 htd = TableDescriptorBuilder.newBuilder(this.admin.getDescriptor(tableName)) 517 .setCompactionEnabled(false).build(); 518 admin.modifyTable(htd); 519 // Delete the table 520 this.admin.disableTable(tableName); 521 this.admin.deleteTable(tableName); 522 assertFalse(this.admin.tableExists(tableName)); 523 } 524 525 @Test(expected = TableNotDisabledException.class) 526 public void testModifyRegionReplicasEnabledTable() throws Exception { 527 final TableName tableName = TableName.valueOf(name.getMethodName()); 528 TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); 529 530 // Modify region replication count 531 TableDescriptor htd = TableDescriptorBuilder.newBuilder(admin.getDescriptor(tableName)) 532 .setRegionReplication(3).build(); 533 try { 534 // try to modify the region replication count without disabling the table 535 admin.modifyTable(htd); 536 fail("Expected an exception"); 537 } finally { 538 // Delete the table 539 admin.disableTable(tableName); 540 admin.deleteTable(tableName); 541 assertFalse(admin.tableExists(tableName)); 542 } 543 } 544 545 /** 546 * Verify schema modification takes. 547 */ 548 @Test 549 public void testOnlineChangeTableSchema() throws IOException, InterruptedException { 550 final TableName tableName = TableName.valueOf(name.getMethodName()); 551 HTableDescriptor [] tables = admin.listTables(); 552 int numTables = tables.length; 553 TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); 554 tables = this.admin.listTables(); 555 assertEquals(numTables + 1, tables.length); 556 557 // FIRST, do htabledescriptor changes. 558 HTableDescriptor htd = this.admin.getTableDescriptor(tableName); 559 // Make a copy and assert copy is good. 560 HTableDescriptor copy = new HTableDescriptor(htd); 561 assertTrue(htd.equals(copy)); 562 // Now amend the copy. Introduce differences. 563 long newFlushSize = htd.getMemStoreFlushSize() / 2; 564 if (newFlushSize <=0) { 565 newFlushSize = HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2; 566 } 567 copy.setMemStoreFlushSize(newFlushSize); 568 final String key = "anyoldkey"; 569 assertTrue(htd.getValue(key) == null); 570 copy.setValue(key, key); 571 boolean expectedException = false; 572 try { 573 admin.modifyTable(tableName, copy); 574 } catch (TableNotDisabledException re) { 575 expectedException = true; 576 } 577 assertFalse(expectedException); 578 HTableDescriptor modifiedHtd = new HTableDescriptor(this.admin.getTableDescriptor(tableName)); 579 assertFalse(htd.equals(modifiedHtd)); 580 assertTrue(copy.equals(modifiedHtd)); 581 assertEquals(newFlushSize, modifiedHtd.getMemStoreFlushSize()); 582 assertEquals(key, modifiedHtd.getValue(key)); 583 584 // Now work on column family changes. 585 int countOfFamilies = modifiedHtd.getFamilies().size(); 586 assertTrue(countOfFamilies > 0); 587 HColumnDescriptor hcd = modifiedHtd.getFamilies().iterator().next(); 588 int maxversions = hcd.getMaxVersions(); 589 final int newMaxVersions = maxversions + 1; 590 hcd.setMaxVersions(newMaxVersions); 591 final byte [] hcdName = hcd.getName(); 592 expectedException = false; 593 try { 594 this.admin.modifyColumnFamily(tableName, hcd); 595 } catch (TableNotDisabledException re) { 596 expectedException = true; 597 } 598 assertFalse(expectedException); 599 modifiedHtd = this.admin.getTableDescriptor(tableName); 600 HColumnDescriptor modifiedHcd = modifiedHtd.getFamily(hcdName); 601 assertEquals(newMaxVersions, modifiedHcd.getMaxVersions()); 602 603 // Try adding a column 604 assertFalse(this.admin.isTableDisabled(tableName)); 605 final String xtracolName = "xtracol"; 606 HColumnDescriptor xtracol = new HColumnDescriptor(xtracolName); 607 xtracol.setValue(xtracolName, xtracolName); 608 expectedException = false; 609 try { 610 this.admin.addColumnFamily(tableName, xtracol); 611 } catch (TableNotDisabledException re) { 612 expectedException = true; 613 } 614 // Add column should work even if the table is enabled 615 assertFalse(expectedException); 616 modifiedHtd = this.admin.getTableDescriptor(tableName); 617 hcd = modifiedHtd.getFamily(xtracol.getName()); 618 assertTrue(hcd != null); 619 assertTrue(hcd.getValue(xtracolName).equals(xtracolName)); 620 621 // Delete the just-added column. 622 this.admin.deleteColumnFamily(tableName, xtracol.getName()); 623 modifiedHtd = this.admin.getTableDescriptor(tableName); 624 hcd = modifiedHtd.getFamily(xtracol.getName()); 625 assertTrue(hcd == null); 626 627 // Delete the table 628 this.admin.disableTable(tableName); 629 this.admin.deleteTable(tableName); 630 this.admin.listTables(); 631 assertFalse(this.admin.tableExists(tableName)); 632 } 633 634 protected void verifyRoundRobinDistribution(ClusterConnection c, RegionLocator regionLocator, int 635 expectedRegions) throws IOException { 636 int numRS = c.getCurrentNrHRS(); 637 List<HRegionLocation> regions = regionLocator.getAllRegionLocations(); 638 Map<ServerName, List<RegionInfo>> server2Regions = new HashMap<>(); 639 for (HRegionLocation loc : regions) { 640 ServerName server = loc.getServerName(); 641 List<RegionInfo> regs = server2Regions.get(server); 642 if (regs == null) { 643 regs = new ArrayList<>(); 644 server2Regions.put(server, regs); 645 } 646 regs.add(loc.getRegionInfo()); 647 } 648 boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration()); 649 if (tablesOnMaster) { 650 // Ignore the master region server, 651 // which contains less regions by intention. 652 numRS--; 653 } 654 float average = (float) expectedRegions/numRS; 655 int min = (int)Math.floor(average); 656 int max = (int)Math.ceil(average); 657 for (List<RegionInfo> regionList : server2Regions.values()) { 658 assertTrue("numRS=" + numRS + ", min=" + min + ", max=" + max + 659 ", size=" + regionList.size() + ", tablesOnMaster=" + tablesOnMaster, 660 regionList.size() == min || regionList.size() == max); 661 } 662 } 663 664 @Test 665 public void testCreateTableNumberOfRegions() throws IOException, InterruptedException { 666 final TableName tableName = TableName.valueOf(name.getMethodName()); 667 HTableDescriptor desc = new HTableDescriptor(tableName); 668 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); 669 admin.createTable(desc); 670 List<HRegionLocation> regions; 671 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) { 672 regions = l.getAllRegionLocations(); 673 assertEquals("Table should have only 1 region", 1, regions.size()); 674 } 675 676 TableName TABLE_2 = TableName.valueOf(tableName.getNameAsString() + "_2"); 677 desc = new HTableDescriptor(TABLE_2); 678 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); 679 admin.createTable(desc, new byte[][]{new byte[]{42}}); 680 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_2)) { 681 regions = l.getAllRegionLocations(); 682 assertEquals("Table should have only 2 region", 2, regions.size()); 683 } 684 685 TableName TABLE_3 = TableName.valueOf(tableName.getNameAsString() + "_3"); 686 desc = new HTableDescriptor(TABLE_3); 687 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); 688 admin.createTable(desc, "a".getBytes(), "z".getBytes(), 3); 689 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_3)) { 690 regions = l.getAllRegionLocations(); 691 assertEquals("Table should have only 3 region", 3, regions.size()); 692 } 693 694 TableName TABLE_4 = TableName.valueOf(tableName.getNameAsString() + "_4"); 695 desc = new HTableDescriptor(TABLE_4); 696 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); 697 try { 698 admin.createTable(desc, "a".getBytes(), "z".getBytes(), 2); 699 fail("Should not be able to create a table with only 2 regions using this API."); 700 } catch (IllegalArgumentException eae) { 701 // Expected 702 } 703 704 TableName TABLE_5 = TableName.valueOf(tableName.getNameAsString() + "_5"); 705 desc = new HTableDescriptor(TABLE_5); 706 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); 707 admin.createTable(desc, new byte[] { 1 }, new byte[] { 127 }, 16); 708 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_5)) { 709 regions = l.getAllRegionLocations(); 710 assertEquals("Table should have 16 region", 16, regions.size()); 711 } 712 } 713 714 @Test 715 public void testCreateTableWithRegions() throws IOException, InterruptedException { 716 final TableName tableName = TableName.valueOf(name.getMethodName()); 717 718 byte [][] splitKeys = { 719 new byte [] { 1, 1, 1 }, 720 new byte [] { 2, 2, 2 }, 721 new byte [] { 3, 3, 3 }, 722 new byte [] { 4, 4, 4 }, 723 new byte [] { 5, 5, 5 }, 724 new byte [] { 6, 6, 6 }, 725 new byte [] { 7, 7, 7 }, 726 new byte [] { 8, 8, 8 }, 727 new byte [] { 9, 9, 9 }, 728 }; 729 int expectedRegions = splitKeys.length + 1; 730 731 HTableDescriptor desc = new HTableDescriptor(tableName); 732 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); 733 admin.createTable(desc, splitKeys); 734 735 boolean tableAvailable = admin.isTableAvailable(tableName, splitKeys); 736 assertTrue("Table should be created with splitKyes + 1 rows in META", tableAvailable); 737 738 List<HRegionLocation> regions; 739 Iterator<HRegionLocation> hris; 740 RegionInfo hri; 741 ClusterConnection conn = (ClusterConnection) TEST_UTIL.getConnection(); 742 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) { 743 regions = l.getAllRegionLocations(); 744 745 assertEquals("Tried to create " + expectedRegions + " regions " + 746 "but only found " + regions.size(), expectedRegions, regions.size()); 747 System.err.println("Found " + regions.size() + " regions"); 748 749 hris = regions.iterator(); 750 hri = hris.next().getRegionInfo(); 751 assertTrue(hri.getStartKey() == null || hri.getStartKey().length == 0); 752 assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[0])); 753 hri = hris.next().getRegionInfo(); 754 assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[0])); 755 assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[1])); 756 hri = hris.next().getRegionInfo(); 757 assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[1])); 758 assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[2])); 759 hri = hris.next().getRegionInfo(); 760 assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[2])); 761 assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[3])); 762 hri = hris.next().getRegionInfo(); 763 assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[3])); 764 assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[4])); 765 hri = hris.next().getRegionInfo(); 766 assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[4])); 767 assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[5])); 768 hri = hris.next().getRegionInfo(); 769 assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[5])); 770 assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[6])); 771 hri = hris.next().getRegionInfo(); 772 assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[6])); 773 assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[7])); 774 hri = hris.next().getRegionInfo(); 775 assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[7])); 776 assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[8])); 777 hri = hris.next().getRegionInfo(); 778 assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[8])); 779 assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0); 780 781 verifyRoundRobinDistribution(conn, l, expectedRegions); 782 } 783 784 785 786 // Now test using start/end with a number of regions 787 788 // Use 80 bit numbers to make sure we aren't limited 789 byte [] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; 790 byte [] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; 791 792 // Splitting into 10 regions, we expect (null,1) ... (9, null) 793 // with (1,2) (2,3) (3,4) (4,5) (5,6) (6,7) (7,8) (8,9) in the middle 794 795 expectedRegions = 10; 796 797 TableName TABLE_2 = TableName.valueOf(tableName.getNameAsString() + "_2"); 798 799 desc = new HTableDescriptor(TABLE_2); 800 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); 801 admin = TEST_UTIL.getAdmin(); 802 admin.createTable(desc, startKey, endKey, expectedRegions); 803 804 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_2)) { 805 regions = l.getAllRegionLocations(); 806 assertEquals("Tried to create " + expectedRegions + " regions " + 807 "but only found " + regions.size(), expectedRegions, regions.size()); 808 System.err.println("Found " + regions.size() + " regions"); 809 810 hris = regions.iterator(); 811 hri = hris.next().getRegionInfo(); 812 assertTrue(hri.getStartKey() == null || hri.getStartKey().length == 0); 813 assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 })); 814 hri = hris.next().getRegionInfo(); 815 assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 })); 816 assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 })); 817 hri = hris.next().getRegionInfo(); 818 assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 })); 819 assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 })); 820 hri = hris.next().getRegionInfo(); 821 assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 })); 822 assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 })); 823 hri = hris.next().getRegionInfo(); 824 assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 })); 825 assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 })); 826 hri = hris.next().getRegionInfo(); 827 assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 })); 828 assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 })); 829 hri = hris.next().getRegionInfo(); 830 assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 })); 831 assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 })); 832 hri = hris.next().getRegionInfo(); 833 assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 })); 834 assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 })); 835 hri = hris.next().getRegionInfo(); 836 assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 })); 837 assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 })); 838 hri = hris.next().getRegionInfo(); 839 assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 })); 840 assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0); 841 842 verifyRoundRobinDistribution(conn, l, expectedRegions); 843 } 844 845 // Try once more with something that divides into something infinite 846 847 startKey = new byte [] { 0, 0, 0, 0, 0, 0 }; 848 endKey = new byte [] { 1, 0, 0, 0, 0, 0 }; 849 850 expectedRegions = 5; 851 852 TableName TABLE_3 = TableName.valueOf(tableName.getNameAsString() + "_3"); 853 854 desc = new HTableDescriptor(TABLE_3); 855 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); 856 admin = TEST_UTIL.getAdmin(); 857 admin.createTable(desc, startKey, endKey, expectedRegions); 858 859 860 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_3)) { 861 regions = l.getAllRegionLocations(); 862 assertEquals("Tried to create " + expectedRegions + " regions " + 863 "but only found " + regions.size(), expectedRegions, regions.size()); 864 System.err.println("Found " + regions.size() + " regions"); 865 866 verifyRoundRobinDistribution(conn, l, expectedRegions); 867 } 868 869 870 // Try an invalid case where there are duplicate split keys 871 splitKeys = new byte [][] { 872 new byte [] { 1, 1, 1 }, 873 new byte [] { 2, 2, 2 }, 874 new byte [] { 3, 3, 3 }, 875 new byte [] { 2, 2, 2 } 876 }; 877 878 TableName TABLE_4 = TableName.valueOf(tableName.getNameAsString() + "_4"); 879 desc = new HTableDescriptor(TABLE_4); 880 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); 881 try { 882 admin.createTable(desc, splitKeys); 883 assertTrue("Should not be able to create this table because of " + 884 "duplicate split keys", false); 885 } catch(IllegalArgumentException iae) { 886 // Expected 887 } 888 } 889 890 @Test 891 public void testTableAvailableWithRandomSplitKeys() throws Exception { 892 final TableName tableName = TableName.valueOf(name.getMethodName()); 893 HTableDescriptor desc = new HTableDescriptor(tableName); 894 desc.addFamily(new HColumnDescriptor("col")); 895 byte[][] splitKeys = new byte[1][]; 896 splitKeys = new byte [][] { 897 new byte [] { 1, 1, 1 }, 898 new byte [] { 2, 2, 2 } 899 }; 900 admin.createTable(desc); 901 boolean tableAvailable = admin.isTableAvailable(tableName, splitKeys); 902 assertFalse("Table should be created with 1 row in META", tableAvailable); 903 } 904 905 @Test 906 public void testCreateTableWithOnlyEmptyStartRow() throws IOException { 907 final byte[] tableName = Bytes.toBytes(name.getMethodName()); 908 byte[][] splitKeys = new byte[1][]; 909 splitKeys[0] = HConstants.EMPTY_BYTE_ARRAY; 910 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); 911 desc.addFamily(new HColumnDescriptor("col")); 912 try { 913 admin.createTable(desc, splitKeys); 914 fail("Test case should fail as empty split key is passed."); 915 } catch (IllegalArgumentException e) { 916 } 917 } 918 919 @Test 920 public void testCreateTableWithEmptyRowInTheSplitKeys() throws IOException{ 921 final byte[] tableName = Bytes.toBytes(name.getMethodName()); 922 byte[][] splitKeys = new byte[3][]; 923 splitKeys[0] = "region1".getBytes(); 924 splitKeys[1] = HConstants.EMPTY_BYTE_ARRAY; 925 splitKeys[2] = "region2".getBytes(); 926 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); 927 desc.addFamily(new HColumnDescriptor("col")); 928 try { 929 admin.createTable(desc, splitKeys); 930 fail("Test case should fail as empty split key is passed."); 931 } catch (IllegalArgumentException e) { 932 LOG.info("Expected ", e); 933 } 934 } 935 936 @Test 937 public void testTableExist() throws IOException { 938 final TableName table = TableName.valueOf(name.getMethodName()); 939 boolean exist; 940 exist = this.admin.tableExists(table); 941 assertEquals(false, exist); 942 TEST_UTIL.createTable(table, HConstants.CATALOG_FAMILY); 943 exist = this.admin.tableExists(table); 944 assertEquals(true, exist); 945 } 946 947 /** 948 * Tests forcing split from client and having scanners successfully ride over split. 949 * @throws Exception 950 * @throws IOException 951 */ 952 @Test 953 public void testForceSplit() throws Exception { 954 byte[][] familyNames = new byte[][] { Bytes.toBytes("cf") }; 955 int[] rowCounts = new int[] { 6000 }; 956 int numVersions = HColumnDescriptor.DEFAULT_VERSIONS; 957 int blockSize = 256; 958 splitTest(null, familyNames, rowCounts, numVersions, blockSize, true); 959 960 byte[] splitKey = Bytes.toBytes(3500); 961 splitTest(splitKey, familyNames, rowCounts, numVersions, blockSize, true); 962 // test regionSplitSync 963 splitTest(splitKey, familyNames, rowCounts, numVersions, blockSize, false); 964 } 965 966 /** 967 * Test retain assignment on enableTable. 968 * 969 * @throws IOException 970 */ 971 @Test 972 public void testEnableTableRetainAssignment() throws IOException { 973 final TableName tableName = TableName.valueOf(name.getMethodName()); 974 byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, 975 new byte[] { 3, 3, 3 }, new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, 976 new byte[] { 6, 6, 6 }, new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, 977 new byte[] { 9, 9, 9 } }; 978 int expectedRegions = splitKeys.length + 1; 979 HTableDescriptor desc = new HTableDescriptor(tableName); 980 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); 981 admin.createTable(desc, splitKeys); 982 983 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) { 984 List<HRegionLocation> regions = l.getAllRegionLocations(); 985 986 assertEquals( 987 "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), 988 expectedRegions, regions.size()); 989 // Disable table. 990 admin.disableTable(tableName); 991 // Enable table, use retain assignment to assign regions. 992 admin.enableTable(tableName); 993 List<HRegionLocation> regions2 = l.getAllRegionLocations(); 994 995 // Check the assignment. 996 assertEquals(regions.size(), regions2.size()); 997 assertTrue(regions2.containsAll(regions)); 998 } 999 } 1000 1001 /** 1002 * Multi-family scenario. Tests forcing split from client and 1003 * having scanners successfully ride over split. 1004 * @throws Exception 1005 * @throws IOException 1006 */ 1007 @Test 1008 public void testForceSplitMultiFamily() throws Exception { 1009 int numVersions = HColumnDescriptor.DEFAULT_VERSIONS; 1010 1011 // use small HFile block size so that we can have lots of blocks in HFile 1012 // Otherwise, if there is only one block, 1013 // HFileBlockIndex.midKey()'s value == startKey 1014 int blockSize = 256; 1015 byte[][] familyNames = new byte[][] { Bytes.toBytes("cf1"), 1016 Bytes.toBytes("cf2") }; 1017 1018 // one of the column families isn't splittable 1019 int[] rowCounts = new int[] { 6000, 1 }; 1020 splitTest(null, familyNames, rowCounts, numVersions, blockSize, true); 1021 1022 rowCounts = new int[] { 1, 6000 }; 1023 splitTest(null, familyNames, rowCounts, numVersions, blockSize, true); 1024 1025 // one column family has much smaller data than the other 1026 // the split key should be based on the largest column family 1027 rowCounts = new int[] { 6000, 300 }; 1028 splitTest(null, familyNames, rowCounts, numVersions, blockSize, true); 1029 1030 rowCounts = new int[] { 300, 6000 }; 1031 splitTest(null, familyNames, rowCounts, numVersions, blockSize, true); 1032 1033 } 1034 1035 void splitTest(byte[] splitPoint, byte[][] familyNames, int[] rowCounts, 1036 int numVersions, int blockSize, boolean async) throws Exception { 1037 TableName tableName = TableName.valueOf("testForceSplit"); 1038 StringBuilder sb = new StringBuilder(); 1039 // Add tail to String so can see better in logs where a test is running. 1040 for (int i = 0; i < rowCounts.length; i++) { 1041 sb.append("_").append(Integer.toString(rowCounts[i])); 1042 } 1043 assertFalse(admin.tableExists(tableName)); 1044 try (final Table table = TEST_UTIL.createTable(tableName, familyNames, 1045 numVersions, blockSize); 1046 final RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { 1047 1048 int rowCount = 0; 1049 byte[] q = new byte[0]; 1050 1051 // insert rows into column families. The number of rows that have values 1052 // in a specific column family is decided by rowCounts[familyIndex] 1053 for (int index = 0; index < familyNames.length; index++) { 1054 ArrayList<Put> puts = new ArrayList<>(rowCounts[index]); 1055 for (int i = 0; i < rowCounts[index]; i++) { 1056 byte[] k = Bytes.toBytes(i); 1057 Put put = new Put(k); 1058 put.addColumn(familyNames[index], q, k); 1059 puts.add(put); 1060 } 1061 table.put(puts); 1062 1063 if (rowCount < rowCounts[index]) { 1064 rowCount = rowCounts[index]; 1065 } 1066 } 1067 1068 // get the initial layout (should just be one region) 1069 List<HRegionLocation> m = locator.getAllRegionLocations(); 1070 LOG.info("Initial regions (" + m.size() + "): " + m); 1071 assertTrue(m.size() == 1); 1072 1073 // Verify row count 1074 Scan scan = new Scan(); 1075 ResultScanner scanner = table.getScanner(scan); 1076 int rows = 0; 1077 for (@SuppressWarnings("unused") Result result : scanner) { 1078 rows++; 1079 } 1080 scanner.close(); 1081 assertEquals(rowCount, rows); 1082 1083 // Have an outstanding scan going on to make sure we can scan over splits. 1084 scan = new Scan(); 1085 scanner = table.getScanner(scan); 1086 // Scan first row so we are into first region before split happens. 1087 scanner.next(); 1088 1089 // Split the table 1090 if (async) { 1091 this.admin.split(tableName, splitPoint); 1092 final AtomicInteger count = new AtomicInteger(0); 1093 Thread t = new Thread("CheckForSplit") { 1094 @Override public void run() { 1095 for (int i = 0; i < 45; i++) { 1096 try { 1097 sleep(1000); 1098 } catch (InterruptedException e) { 1099 continue; 1100 } 1101 // check again 1102 List<HRegionLocation> regions = null; 1103 try { 1104 regions = locator.getAllRegionLocations(); 1105 } catch (IOException e) { 1106 e.printStackTrace(); 1107 } 1108 if (regions == null) continue; 1109 count.set(regions.size()); 1110 if (count.get() >= 2) { 1111 LOG.info("Found: " + regions); 1112 break; 1113 } 1114 LOG.debug("Cycle waiting on split"); 1115 } 1116 LOG.debug("CheckForSplit thread exited, current region count: " + count.get()); 1117 } 1118 }; 1119 t.setPriority(Thread.NORM_PRIORITY - 2); 1120 t.start(); 1121 t.join(); 1122 } else { 1123 // Sync split region, no need to create a thread to check 1124 ((HBaseAdmin)admin).splitRegionSync(m.get(0).getRegionInfo().getRegionName(), splitPoint); 1125 } 1126 1127 // Verify row count 1128 rows = 1; // We counted one row above. 1129 for (@SuppressWarnings("unused") Result result : scanner) { 1130 rows++; 1131 if (rows > rowCount) { 1132 scanner.close(); 1133 assertTrue("Scanned more than expected (" + rowCount + ")", false); 1134 } 1135 } 1136 scanner.close(); 1137 assertEquals(rowCount, rows); 1138 1139 List<HRegionLocation> regions = null; 1140 try { 1141 regions = locator.getAllRegionLocations(); 1142 } catch (IOException e) { 1143 e.printStackTrace(); 1144 } 1145 assertEquals(2, regions.size()); 1146 if (splitPoint != null) { 1147 // make sure the split point matches our explicit configuration 1148 assertEquals(Bytes.toString(splitPoint), 1149 Bytes.toString(regions.get(0).getRegionInfo().getEndKey())); 1150 assertEquals(Bytes.toString(splitPoint), 1151 Bytes.toString(regions.get(1).getRegionInfo().getStartKey())); 1152 LOG.debug("Properly split on " + Bytes.toString(splitPoint)); 1153 } else { 1154 if (familyNames.length > 1) { 1155 int splitKey = Bytes.toInt(regions.get(0).getRegionInfo().getEndKey()); 1156 // check if splitKey is based on the largest column family 1157 // in terms of it store size 1158 int deltaForLargestFamily = Math.abs(rowCount / 2 - splitKey); 1159 LOG.debug("SplitKey=" + splitKey + "&deltaForLargestFamily=" + deltaForLargestFamily + 1160 ", r=" + regions.get(0).getRegionInfo()); 1161 for (int index = 0; index < familyNames.length; index++) { 1162 int delta = Math.abs(rowCounts[index] / 2 - splitKey); 1163 if (delta < deltaForLargestFamily) { 1164 assertTrue("Delta " + delta + " for family " + index + " should be at least " 1165 + "deltaForLargestFamily " + deltaForLargestFamily, false); 1166 } 1167 } 1168 } 1169 } 1170 TEST_UTIL.deleteTable(tableName); 1171 } 1172 } 1173 1174 @Test 1175 public void testSplitAndMergeWithReplicaTable() throws Exception { 1176 // The test tries to directly split replica regions and directly merge replica regions. These 1177 // are not allowed. The test validates that. Then the test does a valid split/merge of allowed 1178 // regions. 1179 // Set up a table with 3 regions and replication set to 3 1180 final TableName tableName = TableName.valueOf(name.getMethodName()); 1181 HTableDescriptor desc = new HTableDescriptor(tableName); 1182 desc.setRegionReplication(3); 1183 byte[] cf = "f".getBytes(); 1184 HColumnDescriptor hcd = new HColumnDescriptor(cf); 1185 desc.addFamily(hcd); 1186 byte[][] splitRows = new byte[2][]; 1187 splitRows[0] = new byte[]{(byte)'4'}; 1188 splitRows[1] = new byte[]{(byte)'7'}; 1189 TEST_UTIL.getAdmin().createTable(desc, splitRows); 1190 List<HRegion> oldRegions; 1191 do { 1192 oldRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName); 1193 Thread.sleep(10); 1194 } while (oldRegions.size() != 9); //3 regions * 3 replicas 1195 // write some data to the table 1196 Table ht = TEST_UTIL.getConnection().getTable(tableName); 1197 List<Put> puts = new ArrayList<>(); 1198 byte[] qualifier = "c".getBytes(); 1199 Put put = new Put(new byte[]{(byte)'1'}); 1200 put.addColumn(cf, qualifier, "100".getBytes()); 1201 puts.add(put); 1202 put = new Put(new byte[]{(byte)'6'}); 1203 put.addColumn(cf, qualifier, "100".getBytes()); 1204 puts.add(put); 1205 put = new Put(new byte[]{(byte)'8'}); 1206 put.addColumn(cf, qualifier, "100".getBytes()); 1207 puts.add(put); 1208 ht.put(puts); 1209 ht.close(); 1210 List<Pair<RegionInfo, ServerName>> regions = 1211 MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tableName); 1212 boolean gotException = false; 1213 // the element at index 1 would be a replica (since the metareader gives us ordered 1214 // regions). Try splitting that region via the split API . Should fail 1215 try { 1216 TEST_UTIL.getAdmin().splitRegion(regions.get(1).getFirst().getRegionName()); 1217 } catch (IllegalArgumentException ex) { 1218 gotException = true; 1219 } 1220 assertTrue(gotException); 1221 gotException = false; 1222 // the element at index 1 would be a replica (since the metareader gives us ordered 1223 // regions). Try splitting that region via a different split API (the difference is 1224 // this API goes direct to the regionserver skipping any checks in the admin). Should fail 1225 try { 1226 TEST_UTIL.getHBaseAdmin().splitRegionAsync(regions.get(1).getFirst(), 1227 new byte[]{(byte)'1'}); 1228 } catch (IOException ex) { 1229 gotException = true; 1230 } 1231 assertTrue(gotException); 1232 1233 gotException = false; 1234 //testing Sync split operation 1235 try { 1236 TEST_UTIL.getHBaseAdmin().splitRegionSync(regions.get(1).getFirst().getRegionName(), 1237 new byte[]{(byte)'1'}); 1238 } catch (IllegalArgumentException ex) { 1239 gotException = true; 1240 } 1241 assertTrue(gotException); 1242 1243 gotException = false; 1244 // Try merging a replica with another. Should fail. 1245 try { 1246 TEST_UTIL.getHBaseAdmin().mergeRegionsSync( 1247 regions.get(1).getFirst().getEncodedNameAsBytes(), 1248 regions.get(2).getFirst().getEncodedNameAsBytes(), 1249 true); 1250 } catch (IllegalArgumentException m) { 1251 gotException = true; 1252 } 1253 assertTrue(gotException); 1254 // Try going to the master directly (that will skip the check in admin) 1255 try { 1256 byte[][] nameofRegionsToMerge = new byte[2][]; 1257 nameofRegionsToMerge[0] = regions.get(1).getFirst().getEncodedNameAsBytes(); 1258 nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes(); 1259 MergeTableRegionsRequest request = RequestConverter 1260 .buildMergeTableRegionsRequest( 1261 nameofRegionsToMerge, 1262 true, 1263 HConstants.NO_NONCE, 1264 HConstants.NO_NONCE); 1265 ((ClusterConnection) TEST_UTIL.getAdmin().getConnection()).getMaster() 1266 .mergeTableRegions(null, request); 1267 } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException m) { 1268 Throwable t = m.getCause(); 1269 do { 1270 if (t instanceof MergeRegionException) { 1271 gotException = true; 1272 break; 1273 } 1274 t = t.getCause(); 1275 } while (t != null); 1276 } 1277 assertTrue(gotException); 1278 } 1279 1280 @Test (expected=IllegalArgumentException.class) 1281 public void testInvalidHColumnDescriptor() throws IOException { 1282 new HColumnDescriptor("/cfamily/name"); 1283 } 1284 1285 @Test 1286 public void testEnableDisableAddColumnDeleteColumn() throws Exception { 1287 final TableName tableName = TableName.valueOf(name.getMethodName()); 1288 TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); 1289 while (!this.admin.isTableEnabled(TableName.valueOf(name.getMethodName()))) { 1290 Thread.sleep(10); 1291 } 1292 this.admin.disableTable(tableName); 1293 try { 1294 TEST_UTIL.getConnection().getTable(tableName); 1295 } catch (org.apache.hadoop.hbase.DoNotRetryIOException e) { 1296 //expected 1297 } 1298 1299 this.admin.addColumnFamily(tableName, new HColumnDescriptor("col2")); 1300 this.admin.enableTable(tableName); 1301 try { 1302 this.admin.deleteColumnFamily(tableName, Bytes.toBytes("col2")); 1303 } catch (TableNotDisabledException e) { 1304 LOG.info(e.toString(), e); 1305 } 1306 this.admin.disableTable(tableName); 1307 this.admin.deleteTable(tableName); 1308 } 1309 1310 @Test 1311 public void testDeleteLastColumnFamily() throws Exception { 1312 final TableName tableName = TableName.valueOf(name.getMethodName()); 1313 TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); 1314 while (!this.admin.isTableEnabled(TableName.valueOf(name.getMethodName()))) { 1315 Thread.sleep(10); 1316 } 1317 1318 // test for enabled table 1319 try { 1320 this.admin.deleteColumnFamily(tableName, HConstants.CATALOG_FAMILY); 1321 fail("Should have failed to delete the only column family of a table"); 1322 } catch (InvalidFamilyOperationException ex) { 1323 // expected 1324 } 1325 1326 // test for disabled table 1327 this.admin.disableTable(tableName); 1328 1329 try { 1330 this.admin.deleteColumnFamily(tableName, HConstants.CATALOG_FAMILY); 1331 fail("Should have failed to delete the only column family of a table"); 1332 } catch (InvalidFamilyOperationException ex) { 1333 // expected 1334 } 1335 1336 this.admin.deleteTable(tableName); 1337 } 1338 1339 /* 1340 * Test DFS replication for column families, where one CF has default replication(3) and the other 1341 * is set to 1. 1342 */ 1343 @Test 1344 public void testHFileReplication() throws Exception { 1345 final TableName tableName = TableName.valueOf(this.name.getMethodName()); 1346 String fn1 = "rep1"; 1347 HColumnDescriptor hcd1 = new HColumnDescriptor(fn1); 1348 hcd1.setDFSReplication((short) 1); 1349 String fn = "defaultRep"; 1350 HColumnDescriptor hcd = new HColumnDescriptor(fn); 1351 HTableDescriptor htd = new HTableDescriptor(tableName); 1352 htd.addFamily(hcd); 1353 htd.addFamily(hcd1); 1354 Table table = TEST_UTIL.createTable(htd, null); 1355 TEST_UTIL.waitTableAvailable(tableName); 1356 Put p = new Put(Bytes.toBytes("defaultRep_rk")); 1357 byte[] q1 = Bytes.toBytes("q1"); 1358 byte[] v1 = Bytes.toBytes("v1"); 1359 p.addColumn(Bytes.toBytes(fn), q1, v1); 1360 List<Put> puts = new ArrayList<>(2); 1361 puts.add(p); 1362 p = new Put(Bytes.toBytes("rep1_rk")); 1363 p.addColumn(Bytes.toBytes(fn1), q1, v1); 1364 puts.add(p); 1365 try { 1366 table.put(puts); 1367 admin.flush(tableName); 1368 1369 List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(tableName); 1370 for (HRegion r : regions) { 1371 HStore store = r.getStore(Bytes.toBytes(fn)); 1372 for (HStoreFile sf : store.getStorefiles()) { 1373 assertTrue(sf.toString().contains(fn)); 1374 assertTrue("Column family " + fn + " should have 3 copies", 1375 FSUtils.getDefaultReplication(TEST_UTIL.getTestFileSystem(), sf.getPath()) == (sf 1376 .getFileInfo().getFileStatus().getReplication())); 1377 } 1378 1379 store = r.getStore(Bytes.toBytes(fn1)); 1380 for (HStoreFile sf : store.getStorefiles()) { 1381 assertTrue(sf.toString().contains(fn1)); 1382 assertTrue("Column family " + fn1 + " should have only 1 copy", 1 == sf.getFileInfo() 1383 .getFileStatus().getReplication()); 1384 } 1385 } 1386 } finally { 1387 if (admin.isTableEnabled(tableName)) { 1388 this.admin.disableTable(tableName); 1389 this.admin.deleteTable(tableName); 1390 } 1391 } 1392 } 1393 1394 @Test 1395 public void testMergeRegions() throws Exception { 1396 final TableName tableName = TableName.valueOf(name.getMethodName()); 1397 HColumnDescriptor cd = new HColumnDescriptor("d"); 1398 HTableDescriptor td = new HTableDescriptor(tableName); 1399 td.addFamily(cd); 1400 byte[][] splitRows = new byte[2][]; 1401 splitRows[0] = new byte[]{(byte)'3'}; 1402 splitRows[1] = new byte[]{(byte)'6'}; 1403 try { 1404 TEST_UTIL.createTable(td, splitRows); 1405 TEST_UTIL.waitTableAvailable(tableName); 1406 1407 List<RegionInfo> tableRegions; 1408 RegionInfo regionA; 1409 RegionInfo regionB; 1410 1411 // merge with full name 1412 tableRegions = admin.getRegions(tableName); 1413 assertEquals(3, admin.getTableRegions(tableName).size()); 1414 regionA = tableRegions.get(0); 1415 regionB = tableRegions.get(1); 1416 // TODO convert this to version that is synchronous (See HBASE-16668) 1417 admin.mergeRegionsAsync(regionA.getRegionName(), regionB.getRegionName(), false) 1418 .get(60, TimeUnit.SECONDS); 1419 1420 assertEquals(2, admin.getTableRegions(tableName).size()); 1421 1422 // merge with encoded name 1423 tableRegions = admin.getRegions(tableName); 1424 regionA = tableRegions.get(0); 1425 regionB = tableRegions.get(1); 1426 // TODO convert this to version that is synchronous (See HBASE-16668) 1427 admin.mergeRegionsAsync( 1428 regionA.getEncodedNameAsBytes(), regionB.getEncodedNameAsBytes(), false) 1429 .get(60, TimeUnit.SECONDS); 1430 1431 assertEquals(1, admin.getTableRegions(tableName).size()); 1432 } finally { 1433 this.admin.disableTable(tableName); 1434 this.admin.deleteTable(tableName); 1435 } 1436 } 1437 1438 @Test 1439 public void testSplitShouldNotHappenIfSplitIsDisabledForTable() 1440 throws Exception { 1441 final TableName tableName = TableName.valueOf(name.getMethodName()); 1442 HTableDescriptor htd = new HTableDescriptor(tableName); 1443 htd.addFamily(new HColumnDescriptor("f")); 1444 htd.setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName()); 1445 Table table = TEST_UTIL.createTable(htd, null); 1446 for(int i = 0; i < 10; i++) { 1447 Put p = new Put(Bytes.toBytes("row"+i)); 1448 byte[] q1 = Bytes.toBytes("q1"); 1449 byte[] v1 = Bytes.toBytes("v1"); 1450 p.addColumn(Bytes.toBytes("f"), q1, v1); 1451 table.put(p); 1452 } 1453 this.admin.flush(tableName); 1454 try { 1455 this.admin.split(tableName, Bytes.toBytes("row5")); 1456 Threads.sleep(10000); 1457 } catch (Exception e) { 1458 // Nothing to do. 1459 } 1460 // Split should not happen. 1461 List<RegionInfo> allRegions = MetaTableAccessor.getTableRegions( 1462 this.admin.getConnection(), tableName, true); 1463 assertEquals(1, allRegions.size()); 1464 } 1465 1466 @Test 1467 public void testModifyTableOnTableWithRegionReplicas() throws Exception { 1468 TableName tableName = TableName.valueOf(name.getMethodName()); 1469 TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) 1470 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))) 1471 .setRegionReplication(5) 1472 .build(); 1473 1474 admin.createTable(desc); 1475 1476 int maxFileSize = 10000000; 1477 TableDescriptor newDesc = TableDescriptorBuilder.newBuilder(desc) 1478 .setMaxFileSize(maxFileSize) 1479 .build(); 1480 1481 admin.modifyTable(newDesc); 1482 TableDescriptor newTableDesc = admin.getDescriptor(tableName); 1483 assertEquals(maxFileSize, newTableDesc.getMaxFileSize()); 1484 } 1485}