001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.junit.Assert.assertTrue;
021
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.Collection;
025import java.util.List;
026import java.util.Random;
027import org.apache.hadoop.conf.Configuration;
028import org.apache.hadoop.fs.FileSystem;
029import org.apache.hadoop.fs.Path;
030import org.apache.hadoop.hbase.HBaseClassTestRule;
031import org.apache.hadoop.hbase.HBaseTestingUtility;
032import org.apache.hadoop.hbase.KeyValue;
033import org.apache.hadoop.hbase.TableName;
034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
035import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
036import org.apache.hadoop.hbase.client.RegionInfo;
037import org.apache.hadoop.hbase.client.RegionInfoBuilder;
038import org.apache.hadoop.hbase.client.TableDescriptor;
039import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
040import org.apache.hadoop.hbase.fs.HFileSystem;
041import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
042import org.apache.hadoop.hbase.io.hfile.BlockCache;
043import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
044import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
045import org.apache.hadoop.hbase.io.hfile.BlockType;
046import org.apache.hadoop.hbase.io.hfile.CacheConfig;
047import org.apache.hadoop.hbase.io.hfile.HFile;
048import org.apache.hadoop.hbase.io.hfile.HFileBlock;
049import org.apache.hadoop.hbase.io.hfile.HFileScanner;
050import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil;
051import org.apache.hadoop.hbase.testclassification.RegionServerTests;
052import org.apache.hadoop.hbase.testclassification.SmallTests;
053import org.apache.hadoop.hbase.util.Bytes;
054import org.apache.hadoop.hbase.util.CommonFSUtils;
055import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
056import org.apache.hadoop.hbase.wal.WALFactory;
057import org.junit.After;
058import org.junit.Before;
059import org.junit.ClassRule;
060import org.junit.Rule;
061import org.junit.Test;
062import org.junit.experimental.categories.Category;
063import org.junit.rules.TestName;
064import org.junit.runner.RunWith;
065import org.junit.runners.Parameterized;
066import org.junit.runners.Parameterized.Parameters;
067import org.slf4j.Logger;
068import org.slf4j.LoggerFactory;
069
070/**
071 * Tests {@link HFile} cache-on-write functionality for data blocks, non-root index blocks, and
072 * Bloom filter blocks, as specified by the column family.
073 */
074@RunWith(Parameterized.class)
075@Category({ RegionServerTests.class, SmallTests.class })
076public class TestCacheOnWriteInSchema {
077
078  @ClassRule
079  public static final HBaseClassTestRule CLASS_RULE =
080    HBaseClassTestRule.forClass(TestCacheOnWriteInSchema.class);
081
082  private static final Logger LOG = LoggerFactory.getLogger(TestCacheOnWriteInSchema.class);
083  @Rule
084  public TestName name = new TestName();
085
086  private static final HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU();
087  private static final String DIR = TEST_UTIL.getDataTestDir("TestCacheOnWriteInSchema").toString();
088  private static byte[] table;
089  private static byte[] family = Bytes.toBytes("family");
090  private static final int NUM_KV = 25000;
091  private static final Random rand = new Random(12983177L);
092  /** The number of valid key types possible in a store file */
093  private static final int NUM_VALID_KEY_TYPES = KeyValue.Type.values().length - 2;
094
095  private static enum CacheOnWriteType {
096    DATA_BLOCKS(BlockType.DATA, BlockType.ENCODED_DATA),
097    BLOOM_BLOCKS(BlockType.BLOOM_CHUNK),
098    INDEX_BLOCKS(BlockType.LEAF_INDEX, BlockType.INTERMEDIATE_INDEX);
099
100    private final BlockType blockType1;
101    private final BlockType blockType2;
102
103    private CacheOnWriteType(BlockType blockType) {
104      this(blockType, blockType);
105    }
106
107    private CacheOnWriteType(BlockType blockType1, BlockType blockType2) {
108      this.blockType1 = blockType1;
109      this.blockType2 = blockType2;
110    }
111
112    public boolean shouldBeCached(BlockType blockType) {
113      return blockType == blockType1 || blockType == blockType2;
114    }
115
116    public ColumnFamilyDescriptorBuilder modifyFamilySchema(ColumnFamilyDescriptorBuilder builder) {
117      switch (this) {
118        case DATA_BLOCKS:
119          builder.setCacheDataOnWrite(true);
120          break;
121        case BLOOM_BLOCKS:
122          builder.setCacheBloomsOnWrite(true);
123          break;
124        case INDEX_BLOCKS:
125          builder.setCacheIndexesOnWrite(true);
126          break;
127      }
128      return builder;
129    }
130  }
131
132  private final CacheOnWriteType cowType;
133  private Configuration conf;
134  private final String testDescription;
135  private HRegion region;
136  private HStore store;
137  private WALFactory walFactory;
138  private FileSystem fs;
139
140  public TestCacheOnWriteInSchema(CacheOnWriteType cowType) {
141    this.cowType = cowType;
142    testDescription = "[cacheOnWrite=" + cowType + "]";
143    System.out.println(testDescription);
144  }
145
146  @Parameters
147  public static Collection<Object[]> getParameters() {
148    List<Object[]> cowTypes = new ArrayList<>();
149    for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
150      cowTypes.add(new Object[] { cowType });
151    }
152    return cowTypes;
153  }
154
155  @Before
156  public void setUp() throws IOException {
157    // parameterized tests add [#] suffix get rid of [ and ].
158    table = Bytes.toBytes(name.getMethodName().replaceAll("[\\[\\]]", "_"));
159
160    conf = TEST_UTIL.getConfiguration();
161    conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
162    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
163    conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
164    conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, false);
165    fs = HFileSystem.get(conf);
166
167    // Create the schema
168    ColumnFamilyDescriptor hcd = cowType
169      .modifyFamilySchema(
170        ColumnFamilyDescriptorBuilder.newBuilder(family).setBloomFilterType(BloomType.ROWCOL))
171      .build();
172    TableDescriptor htd =
173      TableDescriptorBuilder.newBuilder(TableName.valueOf(table)).setColumnFamily(hcd).build();
174
175    // Create a store based on the schema
176    String id = TestCacheOnWriteInSchema.class.getName();
177    Path logdir =
178      new Path(CommonFSUtils.getRootDir(conf), AbstractFSWALProvider.getWALDirectoryName(id));
179    fs.delete(logdir, true);
180
181    RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
182    walFactory = new WALFactory(conf, id);
183
184    region = TEST_UTIL.createLocalHRegion(info, conf, htd, walFactory.getWAL(info));
185    region.setBlockCache(BlockCacheFactory.createBlockCache(conf));
186    store = new HStore(region, hcd, conf, false);
187  }
188
189  @After
190  public void tearDown() throws IOException {
191    IOException ex = null;
192    try {
193      region.close();
194    } catch (IOException e) {
195      LOG.warn("Caught Exception", e);
196      ex = e;
197    }
198    try {
199      walFactory.close();
200    } catch (IOException e) {
201      LOG.warn("Caught Exception", e);
202      ex = e;
203    }
204    try {
205      fs.delete(new Path(DIR), true);
206    } catch (IOException e) {
207      LOG.error("Could not delete " + DIR, e);
208      ex = e;
209    }
210    if (ex != null) {
211      throw ex;
212    }
213  }
214
215  @Test
216  public void testCacheOnWriteInSchema() throws IOException {
217    // Write some random data into the store
218    StoreFileWriter writer = store.getStoreEngine()
219      .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(Integer.MAX_VALUE)
220        .compression(HFile.DEFAULT_COMPRESSION_ALGORITHM).isCompaction(false)
221        .includeMVCCReadpoint(true).includesTag(false).shouldDropBehind(false));
222    writeStoreFile(writer);
223    writer.close();
224    // Verify the block types of interest were cached on write
225    readStoreFile(writer.getPath());
226  }
227
228  private void readStoreFile(Path path) throws IOException {
229    CacheConfig cacheConf = store.getCacheConfig();
230    BlockCache cache = cacheConf.getBlockCache().get();
231    HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true);
232    sf.initReader();
233    HFile.Reader reader = sf.getReader().getHFileReader();
234    try {
235      // Open a scanner with (on read) caching disabled
236      HFileScanner scanner = reader.getScanner(conf, false, false);
237      assertTrue(testDescription, scanner.seekTo());
238      // Cribbed from io.hfile.TestCacheOnWrite
239      long offset = 0;
240      while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
241        // Flags: don't cache the block, use pread, this is not a compaction.
242        // Also, pass null for expected block type to avoid checking it.
243        HFileBlock block =
244          reader.readBlock(offset, -1, false, true, false, true, null, DataBlockEncoding.NONE);
245        BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset);
246        boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
247        boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
248        final BlockType blockType = block.getBlockType();
249
250        if (
251          shouldBeCached != isCached
252            && (cowType.blockType1.equals(blockType) || cowType.blockType2.equals(blockType))
253        ) {
254          throw new AssertionError("shouldBeCached: " + shouldBeCached + "\n" + "isCached: "
255            + isCached + "\n" + "Test description: " + testDescription + "\n" + "block: " + block
256            + "\n" + "blockCacheKey: " + blockCacheKey);
257        }
258        offset += block.getOnDiskSizeWithHeader();
259      }
260    } finally {
261      reader.close();
262    }
263  }
264
265  private static KeyValue.Type generateKeyType(Random rand) {
266    if (rand.nextBoolean()) {
267      // Let's make half of KVs puts.
268      return KeyValue.Type.Put;
269    } else {
270      KeyValue.Type keyType = KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)];
271      if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) {
272        throw new RuntimeException("Generated an invalid key type: " + keyType + ". "
273          + "Probably the layout of KeyValue.Type has changed.");
274      }
275      return keyType;
276    }
277  }
278
279  private void writeStoreFile(StoreFileWriter writer) throws IOException {
280    final int rowLen = 32;
281    for (int i = 0; i < NUM_KV; ++i) {
282      byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i);
283      byte[] v = RandomKeyValueUtil.randomValue(rand);
284      int cfLen = rand.nextInt(k.length - rowLen + 1);
285      KeyValue kv = new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen,
286        k.length - rowLen - cfLen, rand.nextLong(), generateKeyType(rand), v, 0, v.length);
287      writer.append(kv);
288    }
289  }
290
291}