001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile.bucket;
019
020import static org.junit.jupiter.api.Assertions.assertFalse;
021import static org.junit.jupiter.api.Assertions.assertTrue;
022
023import java.io.File;
024import java.io.IOException;
025import java.util.Iterator;
026import java.util.Map;
027import java.util.Random;
028import java.util.concurrent.ThreadLocalRandom;
029import org.apache.hadoop.conf.Configuration;
030import org.apache.hadoop.fs.FileSystem;
031import org.apache.hadoop.fs.Path;
032import org.apache.hadoop.hbase.HBaseTestingUtil;
033import org.apache.hadoop.hbase.KeyValue;
034import org.apache.hadoop.hbase.fs.HFileSystem;
035import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
036import org.apache.hadoop.hbase.io.hfile.BlockType;
037import org.apache.hadoop.hbase.io.hfile.CacheConfig;
038import org.apache.hadoop.hbase.io.hfile.HFile;
039import org.apache.hadoop.hbase.io.hfile.HFileBlock;
040import org.apache.hadoop.hbase.io.hfile.HFileContext;
041import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
042import org.apache.hadoop.hbase.io.hfile.PrefetchExecutor;
043import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil;
044import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
045import org.apache.hadoop.hbase.testclassification.IOTests;
046import org.apache.hadoop.hbase.testclassification.MediumTests;
047import org.junit.jupiter.api.Tag;
048import org.junit.jupiter.api.Test;
049import org.slf4j.Logger;
050import org.slf4j.LoggerFactory;
051
052@Tag(IOTests.TAG)
053@Tag(MediumTests.TAG)
054public class TestBucketCachePersister {
055
056  public int constructedBlockSize = 16 * 1024;
057
058  private static final Logger LOG = LoggerFactory.getLogger(TestBucketCachePersister.class);
059
060  public int[] constructedBlockSizes =
061    new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024,
062      28 * 1024 + 1024, 32 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024 };
063
064  private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
065
066  private static final int NUM_VALID_KEY_TYPES = KeyValue.Type.values().length - 2;
067  private static final int DATA_BLOCK_SIZE = 2048;
068  private static final int NUM_KV = 1000;
069
070  final long capacitySize = 32 * 1024 * 1024;
071  final int writeThreads = BucketCache.DEFAULT_WRITER_THREADS;
072  final int writerQLen = BucketCache.DEFAULT_WRITER_QUEUE_ITEMS;
073  Path testDir;
074
075  public Configuration setupBucketCacheConfig(long bucketCachePersistInterval) throws IOException {
076    Configuration conf;
077    conf = TEST_UTIL.getConfiguration();
078    conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true);
079    testDir = TEST_UTIL.getDataTestDir();
080    TEST_UTIL.getTestFileSystem().mkdirs(testDir);
081    conf.setLong(CacheConfig.BUCKETCACHE_PERSIST_INTERVAL_KEY, bucketCachePersistInterval);
082    return conf;
083  }
084
085  public BucketCache setupBucketCache(Configuration conf, String persistentCacheFile)
086    throws IOException {
087    BucketCache bucketCache = new BucketCache("file:" + testDir + "/" + persistentCacheFile,
088      capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen,
089      testDir + "/bucket.persistence", 60 * 1000, conf);
090    return bucketCache;
091  }
092
093  public void cleanupBucketCache(BucketCache bucketCache) throws IOException {
094    bucketCache.shutdown();
095    TEST_UTIL.cleanupDataTestDirOnTestFS(String.valueOf(testDir));
096    assertFalse(TEST_UTIL.getTestFileSystem().exists(testDir));
097  }
098
099  @Test
100  public void testPrefetchPersistenceCrash() throws Exception {
101    long bucketCachePersistInterval = 3000;
102    Configuration conf = setupBucketCacheConfig(bucketCachePersistInterval);
103    BucketCache bucketCache = setupBucketCache(conf, "testPrefetchPersistenceCrash");
104    CacheConfig cacheConf = new CacheConfig(conf, bucketCache);
105    FileSystem fs = HFileSystem.get(conf);
106    // Load Cache
107    Path storeFile = writeStoreFile("TestPrefetch0", conf, cacheConf, fs);
108    Path storeFile2 = writeStoreFile("TestPrefetch1", conf, cacheConf, fs);
109    readStoreFile(storeFile, 0, fs, cacheConf, conf, bucketCache);
110    readStoreFile(storeFile2, 0, fs, cacheConf, conf, bucketCache);
111    Thread.sleep(bucketCachePersistInterval);
112    assertTrue(new File(testDir + "/bucket.persistence").exists());
113    assertTrue(new File(testDir + "/bucket.persistence").delete());
114    cleanupBucketCache(bucketCache);
115  }
116
117  @Test
118  public void testPrefetchPersistenceCrashNegative() throws Exception {
119    long bucketCachePersistInterval = Long.MAX_VALUE;
120    Configuration conf = setupBucketCacheConfig(bucketCachePersistInterval);
121    BucketCache bucketCache = setupBucketCache(conf, "testPrefetchPersistenceCrashNegative");
122    CacheConfig cacheConf = new CacheConfig(conf, bucketCache);
123    FileSystem fs = HFileSystem.get(conf);
124    // Load Cache
125    Path storeFile = writeStoreFile("TestPrefetch2", conf, cacheConf, fs);
126    readStoreFile(storeFile, 0, fs, cacheConf, conf, bucketCache);
127    assertFalse(new File(testDir + "/bucket.persistence").exists());
128    cleanupBucketCache(bucketCache);
129  }
130
131  @Test
132  public void testPrefetchListUponBlockEviction() throws Exception {
133    Configuration conf = setupBucketCacheConfig(200);
134    BucketCache bucketCache = setupBucketCache(conf, "testPrefetchListUponBlockEviction");
135    CacheConfig cacheConf = new CacheConfig(conf, bucketCache);
136    FileSystem fs = HFileSystem.get(conf);
137    // Load Blocks in cache
138    Path storeFile = writeStoreFile("TestPrefetch3", conf, cacheConf, fs);
139    readStoreFile(storeFile, 0, fs, cacheConf, conf, bucketCache);
140    int retries = 0;
141    while (!bucketCache.fullyCachedFiles.containsKey(storeFile.getName()) && retries < 5) {
142      Thread.sleep(500);
143      retries++;
144    }
145    assertTrue(retries < 5);
146    BlockCacheKey bucketCacheKey = bucketCache.backingMap.entrySet().iterator().next().getKey();
147    // Evict Blocks from cache
148    bucketCache.evictBlock(bucketCacheKey);
149    assertFalse(bucketCache.fullyCachedFiles.containsKey(storeFile.getName()));
150    cleanupBucketCache(bucketCache);
151  }
152
153  @Test
154  public void testPrefetchBlockEvictionWhilePrefetchRunning() throws Exception {
155    Configuration conf = setupBucketCacheConfig(200);
156    BucketCache bucketCache =
157      setupBucketCache(conf, "testPrefetchBlockEvictionWhilePrefetchRunning");
158    CacheConfig cacheConf = new CacheConfig(conf, bucketCache);
159    FileSystem fs = HFileSystem.get(conf);
160    // Load Blocks in cache
161    Path storeFile = writeStoreFile("TestPrefetch3", conf, cacheConf, fs);
162    HFile.createReader(fs, storeFile, cacheConf, true, conf);
163    boolean evicted = false;
164    while (!PrefetchExecutor.isCompleted(storeFile)) {
165      LOG.debug("Entered loop as prefetch for {} is still running.", storeFile);
166      if (bucketCache.backingMap.size() > 0 && !evicted) {
167        Iterator<Map.Entry<BlockCacheKey, BucketEntry>> it =
168          bucketCache.backingMap.entrySet().iterator();
169        // Evict a data block from cache
170        Map.Entry<BlockCacheKey, BucketEntry> entry = it.next();
171        while (it.hasNext() && !evicted) {
172          if (entry.getKey().getBlockType().equals(BlockType.DATA)) {
173            evicted = bucketCache.evictBlock(it.next().getKey());
174            LOG.debug("Attempted eviction for {}. Succeeded? {}", storeFile, evicted);
175          }
176        }
177      }
178      Thread.sleep(10);
179    }
180    assertFalse(bucketCache.fullyCachedFiles.containsKey(storeFile.getName()));
181    cleanupBucketCache(bucketCache);
182  }
183
184  public void readStoreFile(Path storeFilePath, long offset, FileSystem fs, CacheConfig cacheConf,
185    Configuration conf, BucketCache bucketCache) throws Exception {
186    // Open the file
187    HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, true, conf);
188
189    while (!reader.prefetchComplete()) {
190      // Sleep for a bit
191      Thread.sleep(1000);
192    }
193    HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, null);
194    BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset);
195    BucketEntry be = bucketCache.backingMap.get(blockCacheKey);
196    boolean isCached = bucketCache.getBlock(blockCacheKey, true, false, true) != null;
197
198    if (
199      block.getBlockType() == BlockType.DATA || block.getBlockType() == BlockType.ROOT_INDEX
200        || block.getBlockType() == BlockType.INTERMEDIATE_INDEX
201    ) {
202      assertTrue(isCached);
203    }
204  }
205
206  public Path writeStoreFile(String fname, Configuration conf, CacheConfig cacheConf, FileSystem fs)
207    throws IOException {
208    Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), fname);
209    HFileContext meta = new HFileContextBuilder().withBlockSize(DATA_BLOCK_SIZE).build();
210    StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs)
211      .withOutputDir(storeFileParentDir).withFileContext(meta).build();
212    Random rand = ThreadLocalRandom.current();
213    final int rowLen = 32;
214    for (int i = 0; i < NUM_KV; ++i) {
215      byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i);
216      byte[] v = RandomKeyValueUtil.randomValue(rand);
217      int cfLen = rand.nextInt(k.length - rowLen + 1);
218      KeyValue kv = new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen,
219        k.length - rowLen - cfLen, rand.nextLong(), generateKeyType(rand), v, 0, v.length);
220      sfw.append(kv);
221    }
222
223    sfw.close();
224    return sfw.getPath();
225  }
226
227  public static KeyValue.Type generateKeyType(Random rand) {
228    if (rand.nextBoolean()) {
229      // Let's make half of KVs puts.
230      return KeyValue.Type.Put;
231    } else {
232      KeyValue.Type keyType = KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)];
233      if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) {
234        throw new RuntimeException("Generated an invalid key type: " + keyType + ". "
235          + "Probably the layout of KeyValue.Type has changed.");
236      }
237      return keyType;
238    }
239  }
240
241}