001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; 021import static org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.DEFAULT_ERROR_TOLERATION_DURATION; 022import static org.junit.Assert.assertEquals; 023import static org.junit.Assert.assertFalse; 024import static org.junit.Assert.assertNotNull; 025import static org.junit.Assert.assertNull; 026import static org.junit.Assert.assertTrue; 027import static org.junit.Assert.fail; 028 029import java.io.IOException; 030import java.util.ArrayList; 031import java.util.HashMap; 032import java.util.HashSet; 033import java.util.List; 034import java.util.Map; 035import java.util.Optional; 036import java.util.Random; 037import java.util.Set; 038import org.apache.hadoop.conf.Configuration; 039import org.apache.hadoop.fs.FileSystem; 040import org.apache.hadoop.fs.Path; 041import org.apache.hadoop.hbase.HBaseClassTestRule; 042import org.apache.hadoop.hbase.HBaseTestingUtil; 043import org.apache.hadoop.hbase.HConstants; 044import org.apache.hadoop.hbase.KeyValue; 045import org.apache.hadoop.hbase.TableName; 046import org.apache.hadoop.hbase.Waiter; 047import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 048import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 049import org.apache.hadoop.hbase.client.RegionInfo; 050import org.apache.hadoop.hbase.client.RegionInfoBuilder; 051import org.apache.hadoop.hbase.client.TableDescriptor; 052import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 053import org.apache.hadoop.hbase.fs.HFileSystem; 054import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; 055import org.apache.hadoop.hbase.io.hfile.BlockCache; 056import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; 057import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; 058import org.apache.hadoop.hbase.io.hfile.BlockType; 059import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; 060import org.apache.hadoop.hbase.io.hfile.CacheConfig; 061import org.apache.hadoop.hbase.io.hfile.CacheTestUtils; 062import org.apache.hadoop.hbase.io.hfile.HFileBlock; 063import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; 064import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; 065import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; 066import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; 067import org.apache.hadoop.hbase.testclassification.RegionServerTests; 068import org.apache.hadoop.hbase.testclassification.SmallTests; 069import org.apache.hadoop.hbase.util.Bytes; 070import org.apache.hadoop.hbase.util.CommonFSUtils; 071import org.apache.hadoop.hbase.util.Pair; 072import org.junit.BeforeClass; 073import org.junit.ClassRule; 074import org.junit.Test; 075import org.junit.experimental.categories.Category; 076import org.slf4j.Logger; 077import org.slf4j.LoggerFactory; 078 079/** 080 * This class is used to test the functionality of the DataTieringManager. 081 * 082 * The mock online regions are stored in {@link TestDataTieringManager#testOnlineRegions}. 083 * For all tests, the setup of {@link TestDataTieringManager#testOnlineRegions} occurs only once. 084 * Please refer to {@link TestDataTieringManager#setupOnlineRegions()} for the structure. 085 * Additionally, a list of all store files is maintained in {@link TestDataTieringManager#hStoreFiles}. 086 * The characteristics of these store files are listed below: 087 * @formatter:off ## HStoreFile Information 088 * 089 * | HStoreFile | Region | Store | DataTiering | isHot | 090 * |------------------|--------------------|---------------------|-----------------------|-------| 091 * | hStoreFile0 | region1 | hStore11 | TIME_RANGE | true | 092 * | hStoreFile1 | region1 | hStore12 | NONE | true | 093 * | hStoreFile2 | region2 | hStore21 | TIME_RANGE | true | 094 * | hStoreFile3 | region2 | hStore22 | TIME_RANGE | false | 095 * @formatter:on 096 */ 097 098@Category({ RegionServerTests.class, SmallTests.class }) 099public class TestDataTieringManager { 100 101 @ClassRule 102 public static final HBaseClassTestRule CLASS_RULE = 103 HBaseClassTestRule.forClass(TestDataTieringManager.class); 104 105 private static final Logger LOG = LoggerFactory.getLogger(TestDataTieringManager.class); 106 private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); 107 private static final long DAY = 24 * 60 * 60 * 1000; 108 private static Configuration defaultConf; 109 private static FileSystem fs; 110 private static BlockCache blockCache; 111 private static CacheConfig cacheConf; 112 private static Path testDir; 113 private static final Map<String, HRegion> testOnlineRegions = new HashMap<>(); 114 115 private static DataTieringManager dataTieringManager; 116 private static final List<HStoreFile> hStoreFiles = new ArrayList<>(); 117 118 /** 119 * Represents the current lexicographically increasing string used as a row key when writing 120 * HFiles. It is incremented each time {@link #nextString()} is called to generate unique row 121 * keys. 122 */ 123 private static String rowKeyString; 124 125 @BeforeClass 126 public static void setupBeforeClass() throws Exception { 127 testDir = TEST_UTIL.getDataTestDir(TestDataTieringManager.class.getSimpleName()); 128 defaultConf = TEST_UTIL.getConfiguration(); 129 updateCommonConfigurations(); 130 assertTrue(DataTieringManager.instantiate(defaultConf, testOnlineRegions)); 131 dataTieringManager = DataTieringManager.getInstance(); 132 rowKeyString = ""; 133 } 134 135 private static void updateCommonConfigurations() { 136 defaultConf.setBoolean(DataTieringManager.GLOBAL_DATA_TIERING_ENABLED_KEY, true); 137 defaultConf.setStrings(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); 138 defaultConf.setLong(BUCKET_CACHE_SIZE_KEY, 32); 139 } 140 141 @FunctionalInterface 142 interface DataTieringMethodCallerWithPath { 143 boolean call(DataTieringManager manager, Path path) throws DataTieringException; 144 } 145 146 @FunctionalInterface 147 interface DataTieringMethodCallerWithKey { 148 boolean call(DataTieringManager manager, BlockCacheKey key) throws DataTieringException; 149 } 150 151 @Test 152 public void testDataTieringEnabledWithKey() throws IOException { 153 initializeTestEnvironment(); 154 DataTieringMethodCallerWithKey methodCallerWithKey = DataTieringManager::isDataTieringEnabled; 155 156 // Test with valid key 157 BlockCacheKey key = new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA); 158 testDataTieringMethodWithKeyNoException(methodCallerWithKey, key, true); 159 160 // Test with another valid key 161 key = new BlockCacheKey(hStoreFiles.get(1).getPath(), 0, true, BlockType.DATA); 162 testDataTieringMethodWithKeyNoException(methodCallerWithKey, key, false); 163 164 // Test with valid key with no HFile Path 165 key = new BlockCacheKey(hStoreFiles.get(0).getPath().getName(), 0); 166 testDataTieringMethodWithKeyExpectingException(methodCallerWithKey, key, 167 new DataTieringException("BlockCacheKey Doesn't Contain HFile Path")); 168 } 169 170 @Test 171 public void testDataTieringEnabledWithPath() throws IOException { 172 initializeTestEnvironment(); 173 DataTieringMethodCallerWithPath methodCallerWithPath = DataTieringManager::isDataTieringEnabled; 174 175 // Test with valid path 176 Path hFilePath = hStoreFiles.get(1).getPath(); 177 testDataTieringMethodWithPathNoException(methodCallerWithPath, hFilePath, false); 178 179 // Test with another valid path 180 hFilePath = hStoreFiles.get(3).getPath(); 181 testDataTieringMethodWithPathNoException(methodCallerWithPath, hFilePath, true); 182 183 // Test with an incorrect path 184 hFilePath = new Path("incorrectPath"); 185 testDataTieringMethodWithPathExpectingException(methodCallerWithPath, hFilePath, 186 new DataTieringException("Incorrect HFile Path: " + hFilePath)); 187 188 // Test with a non-existing HRegion path 189 Path basePath = hStoreFiles.get(0).getPath().getParent().getParent().getParent(); 190 hFilePath = new Path(basePath, "incorrectRegion/cf1/filename"); 191 testDataTieringMethodWithPathExpectingException(methodCallerWithPath, hFilePath, 192 new DataTieringException("HRegion corresponding to " + hFilePath + " doesn't exist")); 193 194 // Test with a non-existing HStore path 195 basePath = hStoreFiles.get(0).getPath().getParent().getParent(); 196 hFilePath = new Path(basePath, "incorrectCf/filename"); 197 testDataTieringMethodWithPathExpectingException(methodCallerWithPath, hFilePath, 198 new DataTieringException("HStore corresponding to " + hFilePath + " doesn't exist")); 199 } 200 201 @Test 202 public void testHotDataWithKey() throws IOException { 203 initializeTestEnvironment(); 204 DataTieringMethodCallerWithKey methodCallerWithKey = DataTieringManager::isHotData; 205 206 // Test with valid key 207 BlockCacheKey key = new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA); 208 testDataTieringMethodWithKeyNoException(methodCallerWithKey, key, true); 209 210 // Test with another valid key 211 key = new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA); 212 testDataTieringMethodWithKeyNoException(methodCallerWithKey, key, false); 213 } 214 215 @Test 216 public void testHotDataWithPath() throws IOException { 217 initializeTestEnvironment(); 218 DataTieringMethodCallerWithPath methodCallerWithPath = DataTieringManager::isHotData; 219 220 // Test with valid path 221 Path hFilePath = hStoreFiles.get(2).getPath(); 222 testDataTieringMethodWithPathNoException(methodCallerWithPath, hFilePath, true); 223 224 // Test with another valid path 225 hFilePath = hStoreFiles.get(3).getPath(); 226 testDataTieringMethodWithPathNoException(methodCallerWithPath, hFilePath, false); 227 228 // Test with a filename where corresponding HStoreFile in not present 229 hFilePath = new Path(hStoreFiles.get(0).getPath().getParent(), "incorrectFileName"); 230 testDataTieringMethodWithPathExpectingException(methodCallerWithPath, hFilePath, 231 new DataTieringException("Store file corresponding to " + hFilePath + " doesn't exist")); 232 } 233 234 @Test 235 public void testPrefetchWhenDataTieringEnabled() throws IOException { 236 setPrefetchBlocksOnOpen(); 237 initializeTestEnvironment(); 238 // Evict blocks from cache by closing the files and passing evict on close. 239 // Then initialize the reader again. Since Prefetch on open is set to true, it should prefetch 240 // those blocks. 241 for (HStoreFile file : hStoreFiles) { 242 file.closeStoreFile(true); 243 file.initReader(); 244 } 245 246 // Since we have one cold file among four files, only three should get prefetched. 247 Optional<Map<String, Pair<String, Long>>> fullyCachedFiles = blockCache.getFullyCachedFiles(); 248 assertTrue("We should get the fully cached files from the cache", fullyCachedFiles.isPresent()); 249 Waiter.waitFor(defaultConf, 10000, () -> fullyCachedFiles.get().size() == 3); 250 assertEquals("Number of fully cached files are incorrect", 3, fullyCachedFiles.get().size()); 251 } 252 253 private void setPrefetchBlocksOnOpen() { 254 defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); 255 } 256 257 @Test 258 public void testColdDataFiles() throws IOException { 259 initializeTestEnvironment(); 260 Set<BlockCacheKey> allCachedBlocks = new HashSet<>(); 261 for (HStoreFile file : hStoreFiles) { 262 allCachedBlocks.add(new BlockCacheKey(file.getPath(), 0, true, BlockType.DATA)); 263 } 264 265 // Verify hStoreFile3 is identified as cold data 266 DataTieringMethodCallerWithPath methodCallerWithPath = DataTieringManager::isHotData; 267 Path hFilePath = hStoreFiles.get(3).getPath(); 268 testDataTieringMethodWithPathNoException(methodCallerWithPath, hFilePath, false); 269 270 // Verify all the other files in hStoreFiles are hot data 271 for (int i = 0; i < hStoreFiles.size() - 1; i++) { 272 hFilePath = hStoreFiles.get(i).getPath(); 273 testDataTieringMethodWithPathNoException(methodCallerWithPath, hFilePath, true); 274 } 275 276 try { 277 Set<String> coldFilePaths = dataTieringManager.getColdDataFiles(allCachedBlocks); 278 assertEquals(1, coldFilePaths.size()); 279 } catch (DataTieringException e) { 280 fail("Unexpected DataTieringException: " + e.getMessage()); 281 } 282 } 283 284 @Test 285 public void testCacheCompactedBlocksOnWriteDataTieringDisabled() throws IOException { 286 setCacheCompactBlocksOnWrite(); 287 initializeTestEnvironment(); 288 289 HRegion region = createHRegion("table3"); 290 testCacheCompactedBlocksOnWrite(region, true); 291 } 292 293 @Test 294 public void testCacheCompactedBlocksOnWriteWithHotData() throws IOException { 295 setCacheCompactBlocksOnWrite(); 296 initializeTestEnvironment(); 297 298 HRegion region = createHRegion("table3", getConfWithTimeRangeDataTieringEnabled(5 * DAY)); 299 testCacheCompactedBlocksOnWrite(region, true); 300 } 301 302 @Test 303 public void testCacheCompactedBlocksOnWriteWithColdData() throws IOException { 304 setCacheCompactBlocksOnWrite(); 305 initializeTestEnvironment(); 306 307 HRegion region = createHRegion("table3", getConfWithTimeRangeDataTieringEnabled(DAY)); 308 testCacheCompactedBlocksOnWrite(region, false); 309 } 310 311 private void setCacheCompactBlocksOnWrite() { 312 defaultConf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, true); 313 } 314 315 private void testCacheCompactedBlocksOnWrite(HRegion region, boolean expectDataBlocksCached) 316 throws IOException { 317 HStore hStore = createHStore(region, "cf1"); 318 createTestFilesForCompaction(hStore); 319 hStore.refreshStoreFiles(); 320 321 region.stores.put(Bytes.toBytes("cf1"), hStore); 322 testOnlineRegions.put(region.getRegionInfo().getEncodedName(), region); 323 324 long initialStoreFilesCount = hStore.getStorefilesCount(); 325 long initialCacheDataBlockCount = blockCache.getDataBlockCount(); 326 assertEquals(3, initialStoreFilesCount); 327 assertEquals(0, initialCacheDataBlockCount); 328 329 region.compact(true); 330 331 long compactedStoreFilesCount = hStore.getStorefilesCount(); 332 long compactedCacheDataBlockCount = blockCache.getDataBlockCount(); 333 assertEquals(1, compactedStoreFilesCount); 334 assertEquals(expectDataBlocksCached, compactedCacheDataBlockCount > 0); 335 } 336 337 private void createTestFilesForCompaction(HStore hStore) throws IOException { 338 long currentTime = System.currentTimeMillis(); 339 Path storeDir = hStore.getStoreContext().getFamilyStoreDirectoryPath(); 340 Configuration configuration = hStore.getReadOnlyConfiguration(); 341 342 createHStoreFile(storeDir, configuration, currentTime - 2 * DAY, 343 hStore.getHRegion().getRegionFileSystem()); 344 createHStoreFile(storeDir, configuration, currentTime - 3 * DAY, 345 hStore.getHRegion().getRegionFileSystem()); 346 createHStoreFile(storeDir, configuration, currentTime - 4 * DAY, 347 hStore.getHRegion().getRegionFileSystem()); 348 } 349 350 @Test 351 public void testPickColdDataFiles() throws IOException { 352 initializeTestEnvironment(); 353 Map<String, String> coldDataFiles = dataTieringManager.getColdFilesList(); 354 assertEquals(1, coldDataFiles.size()); 355 // hStoreFiles[3] is the cold file. 356 assert (coldDataFiles.containsKey(hStoreFiles.get(3).getFileInfo().getActiveFileName())); 357 } 358 359 /* 360 * Verify that two cold blocks(both) are evicted when bucket reaches its capacity. The hot file 361 * remains in the cache. 362 */ 363 @Test 364 public void testBlockEvictions() throws Exception { 365 initializeTestEnvironment(); 366 long capacitySize = 40 * 1024; 367 int writeThreads = 3; 368 int writerQLen = 64; 369 int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; 370 371 // Setup: Create a bucket cache with lower capacity 372 BucketCache bucketCache = 373 new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, 8192, bucketSizes, 374 writeThreads, writerQLen, null, DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); 375 376 // Create three Cache keys with cold data files and a block with hot data. 377 // hStoreFiles.get(3) is a cold data file, while hStoreFiles.get(0) is a hot file. 378 Set<BlockCacheKey> cacheKeys = new HashSet<>(); 379 cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); 380 cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, BlockType.DATA)); 381 cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA)); 382 383 // Create dummy data to be cached and fill the cache completely. 384 CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); 385 386 int blocksIter = 0; 387 for (BlockCacheKey key : cacheKeys) { 388 bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); 389 // Ensure that the block is persisted to the file. 390 Waiter.waitFor(defaultConf, 10000, 100, () -> (bucketCache.getBackingMap().containsKey(key))); 391 } 392 393 // Verify that the bucket cache contains 3 blocks. 394 assertEquals(3, bucketCache.getBackingMap().keySet().size()); 395 396 // Add an additional block into cache with hot data which should trigger the eviction 397 BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); 398 CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); 399 400 bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); 401 Waiter.waitFor(defaultConf, 10000, 100, 402 () -> (bucketCache.getBackingMap().containsKey(newKey))); 403 404 // Verify that the bucket cache now contains 2 hot blocks blocks only. 405 // Both cold blocks of 8KB will be evicted to make room for 1 block of 8KB + an additional 406 // space. 407 validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0); 408 } 409 410 /* 411 * Verify that two cold blocks(both) are evicted when bucket reaches its capacity, but one cold 412 * block remains in the cache since the required space is freed. 413 */ 414 @Test 415 public void testBlockEvictionsAllColdBlocks() throws Exception { 416 initializeTestEnvironment(); 417 long capacitySize = 40 * 1024; 418 int writeThreads = 3; 419 int writerQLen = 64; 420 int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; 421 422 // Setup: Create a bucket cache with lower capacity 423 BucketCache bucketCache = 424 new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, 8192, bucketSizes, 425 writeThreads, writerQLen, null, DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); 426 427 // Create three Cache keys with three cold data blocks. 428 // hStoreFiles.get(3) is a cold data file. 429 Set<BlockCacheKey> cacheKeys = new HashSet<>(); 430 cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); 431 cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, BlockType.DATA)); 432 cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 16384, true, BlockType.DATA)); 433 434 // Create dummy data to be cached and fill the cache completely. 435 CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); 436 437 int blocksIter = 0; 438 for (BlockCacheKey key : cacheKeys) { 439 bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); 440 // Ensure that the block is persisted to the file. 441 Waiter.waitFor(defaultConf, 10000, 100, () -> (bucketCache.getBackingMap().containsKey(key))); 442 } 443 444 // Verify that the bucket cache contains 3 blocks. 445 assertEquals(3, bucketCache.getBackingMap().keySet().size()); 446 447 // Add an additional block into cache with hot data which should trigger the eviction 448 BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); 449 CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); 450 451 bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); 452 Waiter.waitFor(defaultConf, 10000, 100, 453 () -> (bucketCache.getBackingMap().containsKey(newKey))); 454 455 // Verify that the bucket cache now contains 1 cold block and a newly added hot block. 456 validateBlocks(bucketCache.getBackingMap().keySet(), 2, 1, 1); 457 } 458 459 /* 460 * Verify that a hot block evicted along with a cold block when bucket reaches its capacity. 461 */ 462 @Test 463 public void testBlockEvictionsHotBlocks() throws Exception { 464 initializeTestEnvironment(); 465 long capacitySize = 40 * 1024; 466 int writeThreads = 3; 467 int writerQLen = 64; 468 int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; 469 470 // Setup: Create a bucket cache with lower capacity 471 BucketCache bucketCache = 472 new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, 8192, bucketSizes, 473 writeThreads, writerQLen, null, DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); 474 475 // Create three Cache keys with two hot data blocks and one cold data block 476 // hStoreFiles.get(0) is a hot data file and hStoreFiles.get(3) is a cold data file. 477 Set<BlockCacheKey> cacheKeys = new HashSet<>(); 478 cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA)); 479 cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 8192, true, BlockType.DATA)); 480 cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); 481 482 // Create dummy data to be cached and fill the cache completely. 483 CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); 484 485 int blocksIter = 0; 486 for (BlockCacheKey key : cacheKeys) { 487 bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); 488 // Ensure that the block is persisted to the file. 489 Waiter.waitFor(defaultConf, 10000, 100, () -> (bucketCache.getBackingMap().containsKey(key))); 490 } 491 492 // Verify that the bucket cache contains 3 blocks. 493 assertEquals(3, bucketCache.getBackingMap().keySet().size()); 494 495 // Add an additional block which should evict the only cold block with an additional hot block. 496 BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); 497 CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); 498 499 bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); 500 Waiter.waitFor(defaultConf, 10000, 100, 501 () -> (bucketCache.getBackingMap().containsKey(newKey))); 502 503 // Verify that the bucket cache now contains 2 hot blocks. 504 // Only one of the older hot blocks is retained and other one is the newly added hot block. 505 validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0); 506 } 507 508 @Test 509 public void testFeatureKeyDisabled() throws Exception { 510 DataTieringManager.resetForTestingOnly(); 511 defaultConf.setBoolean(DataTieringManager.GLOBAL_DATA_TIERING_ENABLED_KEY, false); 512 initializeTestEnvironment(); 513 514 try { 515 assertFalse(DataTieringManager.instantiate(defaultConf, testOnlineRegions)); 516 // Verify that the DataaTieringManager instance is not instantiated in the 517 // instantiate call above. 518 assertNull(DataTieringManager.getInstance()); 519 520 // Also validate that data temperature is not honoured. 521 long capacitySize = 40 * 1024; 522 int writeThreads = 3; 523 int writerQLen = 64; 524 int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; 525 526 // Setup: Create a bucket cache with lower capacity 527 BucketCache bucketCache = 528 new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, 8192, bucketSizes, 529 writeThreads, writerQLen, null, DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); 530 531 // Create three Cache keys with two hot data blocks and one cold data block 532 // hStoreFiles.get(0) is a hot data file and hStoreFiles.get(3) is a cold data file. 533 List<BlockCacheKey> cacheKeys = new ArrayList<>(); 534 cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA)); 535 cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 8192, true, BlockType.DATA)); 536 cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); 537 538 // Create dummy data to be cached and fill the cache completely. 539 CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); 540 541 int blocksIter = 0; 542 for (BlockCacheKey key : cacheKeys) { 543 LOG.info("Adding {}", key); 544 bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); 545 // Ensure that the block is persisted to the file. 546 Waiter.waitFor(defaultConf, 10000, 100, 547 () -> (bucketCache.getBackingMap().containsKey(key))); 548 } 549 550 // Verify that the bucket cache contains 3 blocks. 551 assertEquals(3, bucketCache.getBackingMap().keySet().size()); 552 553 // Add an additional hot block, which triggers eviction. 554 BlockCacheKey newKey = 555 new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); 556 CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); 557 558 bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); 559 Waiter.waitFor(defaultConf, 10000, 100, 560 () -> (bucketCache.getBackingMap().containsKey(newKey))); 561 562 // Verify that the bucket still contains the only cold block and one newly added hot block. 563 // The older hot blocks are evicted and data-tiering mechanism does not kick in to evict 564 // the cold block. 565 validateBlocks(bucketCache.getBackingMap().keySet(), 2, 1, 1); 566 } finally { 567 DataTieringManager.resetForTestingOnly(); 568 defaultConf.setBoolean(DataTieringManager.GLOBAL_DATA_TIERING_ENABLED_KEY, true); 569 assertTrue(DataTieringManager.instantiate(defaultConf, testOnlineRegions)); 570 } 571 } 572 573 @Test 574 public void testCacheConfigShouldCacheFile() throws Exception { 575 // Evict the files from cache. 576 for (HStoreFile file : hStoreFiles) { 577 file.closeStoreFile(true); 578 } 579 // Verify that the API shouldCacheFileBlock returns the result correctly. 580 // hStoreFiles[0], hStoreFiles[1], hStoreFiles[2] are hot files. 581 // hStoreFiles[3] is a cold file. 582 try { 583 assertTrue(cacheConf.shouldCacheBlockOnRead(BlockCategory.DATA, 584 hStoreFiles.get(0).getFileInfo().getHFileInfo(), 585 hStoreFiles.get(0).getFileInfo().getConf())); 586 assertTrue(cacheConf.shouldCacheBlockOnRead(BlockCategory.DATA, 587 hStoreFiles.get(1).getFileInfo().getHFileInfo(), 588 hStoreFiles.get(1).getFileInfo().getConf())); 589 assertTrue(cacheConf.shouldCacheBlockOnRead(BlockCategory.DATA, 590 hStoreFiles.get(2).getFileInfo().getHFileInfo(), 591 hStoreFiles.get(2).getFileInfo().getConf())); 592 assertFalse(cacheConf.shouldCacheBlockOnRead(BlockCategory.DATA, 593 hStoreFiles.get(3).getFileInfo().getHFileInfo(), 594 hStoreFiles.get(3).getFileInfo().getConf())); 595 } finally { 596 for (HStoreFile file : hStoreFiles) { 597 file.initReader(); 598 } 599 } 600 } 601 602 @Test 603 public void testCacheOnReadColdFile() throws Exception { 604 initializeTestEnvironment(); 605 // hStoreFiles[3] is a cold file. the blocks should not get loaded after a readBlock call. 606 HStoreFile hStoreFile = hStoreFiles.get(3); 607 BlockCacheKey cacheKey = new BlockCacheKey(hStoreFile.getPath(), 0, true, BlockType.DATA); 608 testCacheOnRead(hStoreFile, cacheKey, -1, false); 609 } 610 611 @Test 612 public void testCacheOnReadHotFile() throws Exception { 613 initializeTestEnvironment(); 614 // hStoreFiles[0] is a hot file. the blocks should get loaded after a readBlock call. 615 HStoreFile hStoreFile = hStoreFiles.get(0); 616 BlockCacheKey cacheKey = 617 new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA); 618 testCacheOnRead(hStoreFile, cacheKey, -1, true); 619 } 620 621 private void testCacheOnRead(HStoreFile hStoreFile, BlockCacheKey key, long onDiskBlockSize, 622 boolean expectedCached) throws Exception { 623 // Execute the read block API which will try to cache the block if the block is a hot block. 624 hStoreFile.getReader().getHFileReader().readBlock(key.getOffset(), onDiskBlockSize, true, false, 625 false, false, key.getBlockType(), DataBlockEncoding.NONE); 626 // Validate that the hot block gets cached and cold block is not cached. 627 HFileBlock block = (HFileBlock) blockCache.getBlock(key, false, false, false, BlockType.DATA); 628 if (expectedCached) { 629 assertNotNull(block); 630 } else { 631 assertNull(block); 632 } 633 } 634 635 private void validateBlocks(Set<BlockCacheKey> keys, int expectedTotalKeys, int expectedHotBlocks, 636 int expectedColdBlocks) { 637 int numHotBlocks = 0, numColdBlocks = 0; 638 639 Waiter.waitFor(defaultConf, 10000, 100, () -> (expectedTotalKeys == keys.size())); 640 int iter = 0; 641 for (BlockCacheKey key : keys) { 642 try { 643 if (dataTieringManager.isHotData(key)) { 644 numHotBlocks++; 645 } else { 646 numColdBlocks++; 647 } 648 } catch (Exception e) { 649 fail("Unexpected exception!"); 650 } 651 } 652 assertEquals(expectedHotBlocks, numHotBlocks); 653 assertEquals(expectedColdBlocks, numColdBlocks); 654 } 655 656 private void testDataTieringMethodWithPath(DataTieringMethodCallerWithPath caller, Path path, 657 boolean expectedResult, DataTieringException exception) { 658 try { 659 boolean value = caller.call(dataTieringManager, path); 660 if (exception != null) { 661 fail("Expected DataTieringException to be thrown"); 662 } 663 assertEquals(expectedResult, value); 664 } catch (DataTieringException e) { 665 if (exception == null) { 666 fail("Unexpected DataTieringException: " + e.getMessage()); 667 } 668 assertEquals(exception.getMessage(), e.getMessage()); 669 } 670 } 671 672 private void testDataTieringMethodWithKey(DataTieringMethodCallerWithKey caller, 673 BlockCacheKey key, boolean expectedResult, DataTieringException exception) { 674 try { 675 boolean value = caller.call(dataTieringManager, key); 676 if (exception != null) { 677 fail("Expected DataTieringException to be thrown"); 678 } 679 assertEquals(expectedResult, value); 680 } catch (DataTieringException e) { 681 if (exception == null) { 682 fail("Unexpected DataTieringException: " + e.getMessage()); 683 } 684 assertEquals(exception.getMessage(), e.getMessage()); 685 } 686 } 687 688 private void testDataTieringMethodWithPathExpectingException( 689 DataTieringMethodCallerWithPath caller, Path path, DataTieringException exception) { 690 testDataTieringMethodWithPath(caller, path, false, exception); 691 } 692 693 private void testDataTieringMethodWithPathNoException(DataTieringMethodCallerWithPath caller, 694 Path path, boolean expectedResult) { 695 testDataTieringMethodWithPath(caller, path, expectedResult, null); 696 } 697 698 private void testDataTieringMethodWithKeyExpectingException(DataTieringMethodCallerWithKey caller, 699 BlockCacheKey key, DataTieringException exception) { 700 testDataTieringMethodWithKey(caller, key, false, exception); 701 } 702 703 private void testDataTieringMethodWithKeyNoException(DataTieringMethodCallerWithKey caller, 704 BlockCacheKey key, boolean expectedResult) { 705 testDataTieringMethodWithKey(caller, key, expectedResult, null); 706 } 707 708 private static void initializeTestEnvironment() throws IOException { 709 setupFileSystemAndCache(); 710 setupOnlineRegions(); 711 } 712 713 private static void setupFileSystemAndCache() throws IOException { 714 fs = HFileSystem.get(defaultConf); 715 blockCache = BlockCacheFactory.createBlockCache(defaultConf); 716 cacheConf = new CacheConfig(defaultConf, blockCache); 717 } 718 719 private static void setupOnlineRegions() throws IOException { 720 testOnlineRegions.clear(); 721 hStoreFiles.clear(); 722 long day = 24 * 60 * 60 * 1000; 723 long currentTime = System.currentTimeMillis(); 724 725 HRegion region1 = createHRegion("table1"); 726 727 HStore hStore11 = createHStore(region1, "cf1", getConfWithTimeRangeDataTieringEnabled(day)); 728 hStoreFiles.add(createHStoreFile(hStore11.getStoreContext().getFamilyStoreDirectoryPath(), 729 hStore11.getReadOnlyConfiguration(), currentTime, region1.getRegionFileSystem())); 730 hStore11.refreshStoreFiles(); 731 HStore hStore12 = createHStore(region1, "cf2"); 732 hStoreFiles.add(createHStoreFile(hStore12.getStoreContext().getFamilyStoreDirectoryPath(), 733 hStore12.getReadOnlyConfiguration(), currentTime - day, region1.getRegionFileSystem())); 734 hStore12.refreshStoreFiles(); 735 736 region1.stores.put(Bytes.toBytes("cf1"), hStore11); 737 region1.stores.put(Bytes.toBytes("cf2"), hStore12); 738 739 HRegion region2 = 740 createHRegion("table2", getConfWithTimeRangeDataTieringEnabled((long) (2.5 * day))); 741 742 HStore hStore21 = createHStore(region2, "cf1"); 743 hStoreFiles.add(createHStoreFile(hStore21.getStoreContext().getFamilyStoreDirectoryPath(), 744 hStore21.getReadOnlyConfiguration(), currentTime - 2 * day, region2.getRegionFileSystem())); 745 hStore21.refreshStoreFiles(); 746 HStore hStore22 = createHStore(region2, "cf2"); 747 hStoreFiles.add(createHStoreFile(hStore22.getStoreContext().getFamilyStoreDirectoryPath(), 748 hStore22.getReadOnlyConfiguration(), currentTime - 3 * day, region2.getRegionFileSystem())); 749 hStore22.refreshStoreFiles(); 750 751 region2.stores.put(Bytes.toBytes("cf1"), hStore21); 752 region2.stores.put(Bytes.toBytes("cf2"), hStore22); 753 754 for (HStoreFile file : hStoreFiles) { 755 file.initReader(); 756 } 757 758 testOnlineRegions.put(region1.getRegionInfo().getEncodedName(), region1); 759 testOnlineRegions.put(region2.getRegionInfo().getEncodedName(), region2); 760 } 761 762 private static HRegion createHRegion(String table) throws IOException { 763 return createHRegion(table, defaultConf); 764 } 765 766 private static HRegion createHRegion(String table, Configuration conf) throws IOException { 767 TableName tableName = TableName.valueOf(table); 768 769 TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) 770 .setValue(DataTieringManager.DATATIERING_KEY, conf.get(DataTieringManager.DATATIERING_KEY)) 771 .setValue(DataTieringManager.DATATIERING_HOT_DATA_AGE_KEY, 772 conf.get(DataTieringManager.DATATIERING_HOT_DATA_AGE_KEY)) 773 .build(); 774 RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build(); 775 776 Configuration testConf = new Configuration(conf); 777 CommonFSUtils.setRootDir(testConf, testDir); 778 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, 779 CommonFSUtils.getTableDir(testDir, hri.getTable()), hri); 780 781 HRegion region = new HRegion(regionFs, null, conf, htd, null); 782 // Manually sets the BlockCache for the HRegion instance. 783 // This is necessary because the region server is not started within this method, 784 // and therefore the BlockCache needs to be explicitly configured. 785 region.setBlockCache(blockCache); 786 return region; 787 } 788 789 private static HStore createHStore(HRegion region, String columnFamily) throws IOException { 790 return createHStore(region, columnFamily, defaultConf); 791 } 792 793 private static HStore createHStore(HRegion region, String columnFamily, Configuration conf) 794 throws IOException { 795 ColumnFamilyDescriptor columnFamilyDescriptor = 796 ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(columnFamily)) 797 .setValue(DataTieringManager.DATATIERING_KEY, conf.get(DataTieringManager.DATATIERING_KEY)) 798 .setValue(DataTieringManager.DATATIERING_HOT_DATA_AGE_KEY, 799 conf.get(DataTieringManager.DATATIERING_HOT_DATA_AGE_KEY)) 800 .build(); 801 802 return new HStore(region, columnFamilyDescriptor, conf, false); 803 } 804 805 private static Configuration getConfWithTimeRangeDataTieringEnabled(long hotDataAge) { 806 Configuration conf = new Configuration(defaultConf); 807 conf.set(DataTieringManager.DATATIERING_KEY, DataTieringType.TIME_RANGE.name()); 808 conf.set(DataTieringManager.DATATIERING_HOT_DATA_AGE_KEY, String.valueOf(hotDataAge)); 809 return conf; 810 } 811 812 static HStoreFile createHStoreFile(Path storeDir, Configuration conf, long timestamp, 813 HRegionFileSystem regionFs) throws IOException { 814 String columnFamily = storeDir.getName(); 815 816 StoreFileWriter storeFileWriter = new StoreFileWriter.Builder(conf, cacheConf, fs) 817 .withOutputDir(storeDir).withFileContext(new HFileContextBuilder().build()).build(); 818 819 writeStoreFileRandomData(storeFileWriter, Bytes.toBytes(columnFamily), timestamp); 820 821 StoreContext storeContext = StoreContext.getBuilder().withRegionFileSystem(regionFs).build(); 822 823 StoreFileTracker sft = StoreFileTrackerFactory.create(conf, true, storeContext); 824 return new HStoreFile(fs, storeFileWriter.getPath(), conf, cacheConf, BloomType.NONE, true, 825 sft); 826 } 827 828 /** 829 * Writes random data to a store file with rows arranged in lexicographically increasing order. 830 * Each row is generated using the {@link #nextString()} method, ensuring that each subsequent row 831 * is lexicographically larger than the previous one. 832 */ 833 private static void writeStoreFileRandomData(final StoreFileWriter writer, byte[] columnFamily, 834 long timestamp) throws IOException { 835 int cellsPerFile = 10; 836 byte[] qualifier = Bytes.toBytes("qualifier"); 837 byte[] value = generateRandomBytes(4 * 1024); 838 try { 839 for (int i = 0; i < cellsPerFile; i++) { 840 byte[] row = Bytes.toBytes(nextString()); 841 writer.append(new KeyValue(row, columnFamily, qualifier, timestamp, value)); 842 } 843 } finally { 844 writer.appendTrackedTimestampsToMetadata(); 845 writer.close(); 846 } 847 } 848 849 private static byte[] generateRandomBytes(int sizeInBytes) { 850 Random random = new Random(); 851 byte[] randomBytes = new byte[sizeInBytes]; 852 random.nextBytes(randomBytes); 853 return randomBytes; 854 } 855 856 /** 857 * Returns the lexicographically larger string every time it's called. 858 */ 859 private static String nextString() { 860 if (rowKeyString == null || rowKeyString.isEmpty()) { 861 rowKeyString = "a"; 862 } 863 char lastChar = rowKeyString.charAt(rowKeyString.length() - 1); 864 if (lastChar < 'z') { 865 rowKeyString = rowKeyString.substring(0, rowKeyString.length() - 1) + (char) (lastChar + 1); 866 } else { 867 rowKeyString = rowKeyString + "a"; 868 } 869 return rowKeyString; 870 } 871}