001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.quotas; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertNotNull; 023import static org.junit.Assert.assertTrue; 024 025import java.io.IOException; 026import java.util.Arrays; 027import java.util.Collection; 028import java.util.HashSet; 029import java.util.Map; 030import java.util.Map.Entry; 031import java.util.Set; 032import java.util.concurrent.TimeUnit; 033import java.util.concurrent.atomic.AtomicLong; 034import java.util.concurrent.atomic.AtomicReference; 035import org.apache.hadoop.conf.Configuration; 036import org.apache.hadoop.fs.FileSystem; 037import org.apache.hadoop.fs.Path; 038import org.apache.hadoop.hbase.Cell; 039import org.apache.hadoop.hbase.HBaseClassTestRule; 040import org.apache.hadoop.hbase.HBaseTestingUtility; 041import org.apache.hadoop.hbase.HConstants; 042import org.apache.hadoop.hbase.NamespaceDescriptor; 043import org.apache.hadoop.hbase.TableName; 044import org.apache.hadoop.hbase.Waiter.Predicate; 045import org.apache.hadoop.hbase.client.Admin; 046import org.apache.hadoop.hbase.client.Connection; 047import org.apache.hadoop.hbase.client.Get; 048import org.apache.hadoop.hbase.client.RegionInfo; 049import org.apache.hadoop.hbase.client.Result; 050import org.apache.hadoop.hbase.client.SnapshotDescription; 051import org.apache.hadoop.hbase.client.SnapshotType; 052import org.apache.hadoop.hbase.client.Table; 053import org.apache.hadoop.hbase.master.HMaster; 054import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.NoFilesToDischarge; 055import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate; 056import org.apache.hadoop.hbase.regionserver.HStore; 057import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; 058import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil.SnapshotVisitor; 059import org.apache.hadoop.hbase.testclassification.LargeTests; 060import org.junit.AfterClass; 061import org.junit.Before; 062import org.junit.BeforeClass; 063import org.junit.ClassRule; 064import org.junit.Rule; 065import org.junit.Test; 066import org.junit.experimental.categories.Category; 067import org.junit.rules.TestName; 068import org.slf4j.Logger; 069import org.slf4j.LoggerFactory; 070 071import org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap; 072import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; 073import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; 074 075import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile; 076 077/** 078 * Test class for the {@link SnapshotQuotaObserverChore}. 079 */ 080@Category(LargeTests.class) 081public class TestSnapshotQuotaObserverChore { 082 083 @ClassRule 084 public static final HBaseClassTestRule CLASS_RULE = 085 HBaseClassTestRule.forClass(TestSnapshotQuotaObserverChore.class); 086 087 private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotQuotaObserverChore.class); 088 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 089 private static final AtomicLong COUNTER = new AtomicLong(); 090 091 @Rule 092 public TestName testName = new TestName(); 093 094 private Connection conn; 095 private Admin admin; 096 private SpaceQuotaHelperForTests helper; 097 private HMaster master; 098 private SnapshotQuotaObserverChore testChore; 099 100 @BeforeClass 101 public static void setUp() throws Exception { 102 Configuration conf = TEST_UTIL.getConfiguration(); 103 SpaceQuotaHelperForTests.updateConfigForQuotas(conf); 104 // Clean up the compacted files faster than normal (15s instead of 2mins) 105 conf.setInt("hbase.hfile.compaction.discharger.interval", 15 * 1000); 106 TEST_UTIL.startMiniCluster(1); 107 } 108 109 @AfterClass 110 public static void tearDown() throws Exception { 111 TEST_UTIL.shutdownMiniCluster(); 112 } 113 114 @Before 115 public void setup() throws Exception { 116 conn = TEST_UTIL.getConnection(); 117 admin = TEST_UTIL.getAdmin(); 118 helper = new SpaceQuotaHelperForTests(TEST_UTIL, testName, COUNTER); 119 master = TEST_UTIL.getHBaseCluster().getMaster(); 120 helper.removeAllQuotas(conn); 121 testChore = new SnapshotQuotaObserverChore(TEST_UTIL.getConnection(), 122 TEST_UTIL.getConfiguration(), master.getFileSystem(), master, null); 123 } 124 125 @Test 126 public void testSnapshotsFromTables() throws Exception { 127 TableName tn1 = helper.createTableWithRegions(1); 128 TableName tn2 = helper.createTableWithRegions(1); 129 TableName tn3 = helper.createTableWithRegions(1); 130 131 // Set a space quota on table 1 and 2 (but not 3) 132 admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, 133 SpaceViolationPolicy.NO_INSERTS)); 134 admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn2, SpaceQuotaHelperForTests.ONE_GIGABYTE, 135 SpaceViolationPolicy.NO_INSERTS)); 136 137 // Create snapshots on each table (we didn't write any data, so just skipflush) 138 admin.snapshot(new SnapshotDescription(tn1 + "snapshot", tn1, SnapshotType.SKIPFLUSH)); 139 admin.snapshot(new SnapshotDescription(tn2 + "snapshot", tn2, SnapshotType.SKIPFLUSH)); 140 admin.snapshot(new SnapshotDescription(tn3 + "snapshot", tn3, SnapshotType.SKIPFLUSH)); 141 142 Multimap<TableName, String> mapping = testChore.getSnapshotsToComputeSize(); 143 assertEquals(2, mapping.size()); 144 assertEquals(1, mapping.get(tn1).size()); 145 assertEquals(tn1 + "snapshot", mapping.get(tn1).iterator().next()); 146 assertEquals(1, mapping.get(tn2).size()); 147 assertEquals(tn2 + "snapshot", mapping.get(tn2).iterator().next()); 148 149 admin.snapshot(new SnapshotDescription(tn2 + "snapshot1", tn2, SnapshotType.SKIPFLUSH)); 150 admin.snapshot(new SnapshotDescription(tn3 + "snapshot1", tn3, SnapshotType.SKIPFLUSH)); 151 152 mapping = testChore.getSnapshotsToComputeSize(); 153 assertEquals(3, mapping.size()); 154 assertEquals(1, mapping.get(tn1).size()); 155 assertEquals(tn1 + "snapshot", mapping.get(tn1).iterator().next()); 156 assertEquals(2, mapping.get(tn2).size()); 157 assertEquals(new HashSet<String>(Arrays.asList(tn2 + "snapshot", tn2 + "snapshot1")), 158 mapping.get(tn2)); 159 } 160 161 @Test 162 public void testSnapshotsFromNamespaces() throws Exception { 163 NamespaceDescriptor ns = NamespaceDescriptor.create("snapshots_from_namespaces").build(); 164 admin.createNamespace(ns); 165 166 TableName tn1 = helper.createTableWithRegions(ns.getName(), 1); 167 TableName tn2 = helper.createTableWithRegions(ns.getName(), 1); 168 TableName tn3 = helper.createTableWithRegions(1); 169 170 // Set a throttle quota on 'default' namespace 171 admin.setQuota(QuotaSettingsFactory.throttleNamespace(tn3.getNamespaceAsString(), 172 ThrottleType.WRITE_NUMBER, 100, TimeUnit.SECONDS)); 173 // Set a user throttle quota 174 admin.setQuota( 175 QuotaSettingsFactory.throttleUser("user", ThrottleType.WRITE_NUMBER, 100, TimeUnit.MINUTES)); 176 177 // Set a space quota on the namespace 178 admin.setQuota(QuotaSettingsFactory.limitNamespaceSpace(ns.getName(), 179 SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS)); 180 181 // Create snapshots on each table (we didn't write any data, so just skipflush) 182 admin.snapshot(new SnapshotDescription(tn1.getQualifierAsString() + "snapshot", tn1, 183 SnapshotType.SKIPFLUSH)); 184 admin.snapshot(new SnapshotDescription(tn2.getQualifierAsString() + "snapshot", tn2, 185 SnapshotType.SKIPFLUSH)); 186 admin.snapshot(new SnapshotDescription(tn3.getQualifierAsString() + "snapshot", tn3, 187 SnapshotType.SKIPFLUSH)); 188 189 Multimap<TableName, String> mapping = testChore.getSnapshotsToComputeSize(); 190 assertEquals(2, mapping.size()); 191 assertEquals(1, mapping.get(tn1).size()); 192 assertEquals(tn1.getQualifierAsString() + "snapshot", mapping.get(tn1).iterator().next()); 193 assertEquals(1, mapping.get(tn2).size()); 194 assertEquals(tn2.getQualifierAsString() + "snapshot", mapping.get(tn2).iterator().next()); 195 196 admin.snapshot(new SnapshotDescription(tn2.getQualifierAsString() + "snapshot1", tn2, 197 SnapshotType.SKIPFLUSH)); 198 admin.snapshot(new SnapshotDescription(tn3.getQualifierAsString() + "snapshot2", tn3, 199 SnapshotType.SKIPFLUSH)); 200 201 mapping = testChore.getSnapshotsToComputeSize(); 202 assertEquals(3, mapping.size()); 203 assertEquals(1, mapping.get(tn1).size()); 204 assertEquals(tn1.getQualifierAsString() + "snapshot", mapping.get(tn1).iterator().next()); 205 assertEquals(2, mapping.get(tn2).size()); 206 assertEquals(new HashSet<String>(Arrays.asList(tn2.getQualifierAsString() + "snapshot", 207 tn2.getQualifierAsString() + "snapshot1")), mapping.get(tn2)); 208 } 209 210 @Test 211 public void testSnapshotSize() throws Exception { 212 // Create a table and set a quota 213 TableName tn1 = helper.createTableWithRegions(5); 214 admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, 215 SpaceViolationPolicy.NO_INSERTS)); 216 217 // Write some data and flush it 218 helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE); 219 admin.flush(tn1); 220 221 final long snapshotSize = TEST_UTIL.getMiniHBaseCluster().getRegions(tn1).stream() 222 .flatMap(r -> r.getStores().stream()).mapToLong(HStore::getHFilesSize).sum(); 223 224 // Wait for the Master chore to run to see the usage (with a fudge factor) 225 TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) { 226 @Override 227 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 228 return snapshot.getUsage() == snapshotSize; 229 } 230 }); 231 232 // Create a snapshot on the table 233 final String snapshotName = tn1 + "snapshot"; 234 admin.snapshot(new SnapshotDescription(snapshotName, tn1, SnapshotType.SKIPFLUSH)); 235 236 // Get the snapshots 237 Multimap<TableName, String> snapshotsToCompute = testChore.getSnapshotsToComputeSize(); 238 assertEquals("Expected to see the single snapshot: " + snapshotsToCompute, 1, 239 snapshotsToCompute.size()); 240 241 // Get the size of our snapshot 242 Map<String, Long> namespaceSnapshotSizes = testChore.computeSnapshotSizes(snapshotsToCompute); 243 assertEquals(1, namespaceSnapshotSizes.size()); 244 Long size = namespaceSnapshotSizes.get(tn1.getNamespaceAsString()); 245 assertNotNull(size); 246 // The snapshot should take up no space since the table refers to it completely 247 assertEquals(0, size.longValue()); 248 249 // Write some more data, flush it, and then major_compact the table 250 helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE); 251 admin.flush(tn1); 252 TEST_UTIL.compact(tn1, true); 253 254 // Test table should reflect it's original size since ingest was deterministic 255 TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) { 256 private final long regionSize = TEST_UTIL.getMiniHBaseCluster().getRegions(tn1).stream() 257 .flatMap(r -> r.getStores().stream()).mapToLong(HStore::getHFilesSize).sum(); 258 259 @Override 260 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 261 LOG.debug("Current usage=" + snapshot.getUsage() + " snapshotSize=" + snapshotSize); 262 // The usage of table space consists of region size and snapshot size 263 return closeInSize(snapshot.getUsage(), snapshotSize + regionSize, 264 SpaceQuotaHelperForTests.ONE_KILOBYTE); 265 } 266 }); 267 268 // Wait for no compacted files on the regions of our table 269 TEST_UTIL.waitFor(30_000, new NoFilesToDischarge(TEST_UTIL.getMiniHBaseCluster(), tn1)); 270 271 // Still should see only one snapshot 272 snapshotsToCompute = testChore.getSnapshotsToComputeSize(); 273 assertEquals("Expected to see the single snapshot: " + snapshotsToCompute, 1, 274 snapshotsToCompute.size()); 275 namespaceSnapshotSizes = testChore.computeSnapshotSizes(snapshotsToCompute); 276 assertEquals(1, namespaceSnapshotSizes.size()); 277 size = namespaceSnapshotSizes.get(tn1.getNamespaceAsString()); 278 assertNotNull(size); 279 // The snapshot should take up the size the table originally took up 280 assertEquals(snapshotSize, size.longValue()); 281 } 282 283 @Test 284 public void testPersistingSnapshotsForNamespaces() throws Exception { 285 TableName tn1 = TableName.valueOf("ns1:tn1"); 286 TableName tn2 = TableName.valueOf("ns1:tn2"); 287 TableName tn3 = TableName.valueOf("ns2:tn1"); 288 TableName tn4 = TableName.valueOf("ns2:tn2"); 289 TableName tn5 = TableName.valueOf("tn1"); 290 // Shim in a custom factory to avoid computing snapshot sizes. 291 FileArchiverNotifierFactory test = new FileArchiverNotifierFactory() { 292 Map<TableName, Long> tableToSize = 293 ImmutableMap.of(tn1, 1024L, tn2, 1024L, tn3, 512L, tn4, 1024L, tn5, 3072L); 294 295 @Override 296 public FileArchiverNotifier get(Connection conn, Configuration conf, FileSystem fs, 297 TableName tn) { 298 return new FileArchiverNotifier() { 299 @Override 300 public void addArchivedFiles(Set<Entry<String, Long>> fileSizes) throws IOException { 301 } 302 303 @Override 304 public long computeAndStoreSnapshotSizes(Collection<String> currentSnapshots) 305 throws IOException { 306 return tableToSize.get(tn); 307 } 308 }; 309 } 310 }; 311 try { 312 FileArchiverNotifierFactoryImpl.setInstance(test); 313 314 Multimap<TableName, String> snapshotsToCompute = HashMultimap.create(); 315 snapshotsToCompute.put(tn1, ""); 316 snapshotsToCompute.put(tn2, ""); 317 snapshotsToCompute.put(tn3, ""); 318 snapshotsToCompute.put(tn4, ""); 319 snapshotsToCompute.put(tn5, ""); 320 Map<String, Long> nsSizes = testChore.computeSnapshotSizes(snapshotsToCompute); 321 assertEquals(3, nsSizes.size()); 322 assertEquals(2048L, (long) nsSizes.get("ns1")); 323 assertEquals(1536L, (long) nsSizes.get("ns2")); 324 assertEquals(3072L, (long) nsSizes.get(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR)); 325 } finally { 326 FileArchiverNotifierFactoryImpl.reset(); 327 } 328 } 329 330 @Test 331 public void testRemovedSnapshots() throws Exception { 332 // Create a table and set a quota 333 TableName tn1 = helper.createTableWithRegions(1); 334 admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, 335 SpaceViolationPolicy.NO_INSERTS)); 336 337 // Write some data and flush it 338 helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE); // 256 KB 339 340 final AtomicReference<Long> lastSeenSize = new AtomicReference<>(); 341 // Wait for the Master chore to run to see the usage (with a fudge factor) 342 TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) { 343 @Override 344 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 345 lastSeenSize.set(snapshot.getUsage()); 346 return snapshot.getUsage() > 230L * SpaceQuotaHelperForTests.ONE_KILOBYTE; 347 } 348 }); 349 350 // Create a snapshot on the table 351 final String snapshotName1 = tn1 + "snapshot1"; 352 admin.snapshot(new SnapshotDescription(snapshotName1, tn1, SnapshotType.SKIPFLUSH)); 353 354 // Snapshot size has to be 0 as the snapshot shares the data with the table 355 final Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME); 356 TEST_UTIL.waitFor(30_000, new Predicate<Exception>() { 357 @Override 358 public boolean evaluate() throws Exception { 359 Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName1); 360 Result r = quotaTable.get(g); 361 if (r == null || r.isEmpty()) { 362 return false; 363 } 364 r.advance(); 365 Cell c = r.current(); 366 return QuotaTableUtil.parseSnapshotSize(c) == 0; 367 } 368 }); 369 // Total usage has to remain same as what we saw before taking a snapshot 370 TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) { 371 @Override 372 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 373 return snapshot.getUsage() == lastSeenSize.get(); 374 } 375 }); 376 377 // Major compact the table to force a rewrite 378 TEST_UTIL.compact(tn1, true); 379 // Now the snapshot size has to prev total size 380 TEST_UTIL.waitFor(30_000, new Predicate<Exception>() { 381 @Override 382 public boolean evaluate() throws Exception { 383 Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName1); 384 Result r = quotaTable.get(g); 385 if (r == null || r.isEmpty()) { 386 return false; 387 } 388 r.advance(); 389 Cell c = r.current(); 390 // The compaction result file has an additional compaction event tracker 391 return lastSeenSize.get() == QuotaTableUtil.parseSnapshotSize(c); 392 } 393 }); 394 // The total size now has to be equal/more than double of prev total size 395 // as double the number of store files exist now. 396 final AtomicReference<Long> sizeAfterCompaction = new AtomicReference<>(); 397 TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) { 398 @Override 399 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 400 sizeAfterCompaction.set(snapshot.getUsage()); 401 return snapshot.getUsage() >= 2 * lastSeenSize.get(); 402 } 403 }); 404 405 // Delete the snapshot 406 admin.deleteSnapshot(snapshotName1); 407 // Total size has to come down to prev totalsize - snapshot size(which was removed) 408 TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) { 409 @Override 410 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 411 return snapshot.getUsage() == (sizeAfterCompaction.get() - lastSeenSize.get()); 412 } 413 }); 414 } 415 416 @Test 417 public void testBucketingFilesToSnapshots() throws Exception { 418 // Create a table and set a quota 419 TableName tn1 = helper.createTableWithRegions(1); 420 admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, 421 SpaceViolationPolicy.NO_INSERTS)); 422 423 // Write some data and flush it 424 helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE); 425 admin.flush(tn1); 426 427 final AtomicReference<Long> lastSeenSize = new AtomicReference<>(); 428 // Wait for the Master chore to run to see the usage (with a fudge factor) 429 TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) { 430 @Override 431 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 432 lastSeenSize.set(snapshot.getUsage()); 433 return snapshot.getUsage() > 230L * SpaceQuotaHelperForTests.ONE_KILOBYTE; 434 } 435 }); 436 437 // Create a snapshot on the table 438 final String snapshotName1 = tn1 + "snapshot1"; 439 admin.snapshot(new SnapshotDescription(snapshotName1, tn1, SnapshotType.SKIPFLUSH)); 440 // Major compact the table to force a rewrite 441 TEST_UTIL.compact(tn1, true); 442 443 // Make sure that the snapshot owns the size 444 final Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME); 445 TEST_UTIL.waitFor(30_000, new Predicate<Exception>() { 446 @Override 447 public boolean evaluate() throws Exception { 448 LOG.info("Waiting to see quota snapshot1 size"); 449 debugFilesForSnapshot(tn1, snapshotName1); 450 Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName1); 451 Result r = quotaTable.get(g); 452 if (r == null || r.isEmpty()) { 453 return false; 454 } 455 r.advance(); 456 Cell c = r.current(); 457 // The compaction result file has an additional compaction event tracker 458 return lastSeenSize.get() == QuotaTableUtil.parseSnapshotSize(c); 459 } 460 }); 461 462 LOG.info("Snapshotting table again"); 463 // Create another snapshot on the table 464 final String snapshotName2 = tn1 + "snapshot2"; 465 admin.snapshot(new SnapshotDescription(snapshotName2, tn1, SnapshotType.SKIPFLUSH)); 466 LOG.info("Compacting table"); 467 // Major compact the table to force a rewrite 468 TEST_UTIL.compact(tn1, true); 469 470 // Make sure that the snapshot owns the size 471 TEST_UTIL.waitFor(30_000, new Predicate<Exception>() { 472 @Override 473 public boolean evaluate() throws Exception { 474 LOG.info("Waiting to see quota snapshot2 size"); 475 debugFilesForSnapshot(tn1, snapshotName2); 476 Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName2); 477 Result r = quotaTable.get(g); 478 if (r == null || r.isEmpty()) { 479 return false; 480 } 481 r.advance(); 482 Cell c = r.current(); 483 return closeInSize(lastSeenSize.get(), QuotaTableUtil.parseSnapshotSize(c), 484 SpaceQuotaHelperForTests.ONE_KILOBYTE); 485 } 486 }); 487 488 Get g = QuotaTableUtil.createGetNamespaceSnapshotSize(tn1.getNamespaceAsString()); 489 Result r = quotaTable.get(g); 490 assertNotNull(r); 491 assertFalse(r.isEmpty()); 492 r.advance(); 493 long size = QuotaTableUtil.parseSnapshotSize(r.current()); 494 // Two snapshots of equal size. 495 assertTrue(closeInSize(lastSeenSize.get() * 2, size, SpaceQuotaHelperForTests.ONE_KILOBYTE)); 496 } 497 498 /** 499 * Prints details about every file referenced by the snapshot with the given name. 500 */ 501 void debugFilesForSnapshot(TableName table, String snapshot) throws IOException { 502 final Configuration conf = TEST_UTIL.getConfiguration(); 503 final FileSystem fs = TEST_UTIL.getTestFileSystem(); 504 final Path snapshotDir = new Path(conf.get("hbase.rootdir"), HConstants.SNAPSHOT_DIR_NAME); 505 SnapshotReferenceUtil.visitReferencedFiles(conf, fs, new Path(snapshotDir, snapshot), 506 new SnapshotVisitor() { 507 @Override 508 public void storeFile(RegionInfo regionInfo, String familyName, StoreFile storeFile) 509 throws IOException { 510 LOG.info("Snapshot={} references file={}, size={}", snapshot, storeFile.getName(), 511 storeFile.getFileSize()); 512 } 513 }); 514 } 515 516 /** 517 * Computes if {@code size2} is within {@code delta} of {@code size1}, inclusive. The size of our 518 * store files will change after the first major compaction as the last compaction gets serialized 519 * into the store file (see the fields referenced by COMPACTION_EVENT_KEY in HFilePrettyPrinter). 520 */ 521 boolean closeInSize(long size1, long size2, long delta) { 522 long lower = size1 - delta; 523 long upper = size1 + delta; 524 return lower <= size2 && size2 <= upper; 525 } 526}