001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.junit.Assert.assertTrue;
021
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.Collection;
025import java.util.List;
026import java.util.Random;
027import org.apache.hadoop.conf.Configuration;
028import org.apache.hadoop.fs.FileSystem;
029import org.apache.hadoop.fs.Path;
030import org.apache.hadoop.hbase.HBaseClassTestRule;
031import org.apache.hadoop.hbase.HBaseTestingUtility;
032import org.apache.hadoop.hbase.KeyValue;
033import org.apache.hadoop.hbase.TableName;
034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
035import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
036import org.apache.hadoop.hbase.client.RegionInfo;
037import org.apache.hadoop.hbase.client.RegionInfoBuilder;
038import org.apache.hadoop.hbase.client.TableDescriptor;
039import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
040import org.apache.hadoop.hbase.fs.HFileSystem;
041import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
042import org.apache.hadoop.hbase.io.hfile.BlockCache;
043import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
044import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
045import org.apache.hadoop.hbase.io.hfile.BlockType;
046import org.apache.hadoop.hbase.io.hfile.CacheConfig;
047import org.apache.hadoop.hbase.io.hfile.HFile;
048import org.apache.hadoop.hbase.io.hfile.HFileBlock;
049import org.apache.hadoop.hbase.io.hfile.HFileScanner;
050import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil;
051import org.apache.hadoop.hbase.testclassification.RegionServerTests;
052import org.apache.hadoop.hbase.testclassification.SmallTests;
053import org.apache.hadoop.hbase.util.Bytes;
054import org.apache.hadoop.hbase.util.CommonFSUtils;
055import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
056import org.junit.After;
057import org.junit.Before;
058import org.junit.ClassRule;
059import org.junit.Rule;
060import org.junit.Test;
061import org.junit.experimental.categories.Category;
062import org.junit.rules.TestName;
063import org.junit.runner.RunWith;
064import org.junit.runners.Parameterized;
065import org.junit.runners.Parameterized.Parameters;
066import org.slf4j.Logger;
067import org.slf4j.LoggerFactory;
068
069/**
070 * Tests {@link HFile} cache-on-write functionality for data blocks, non-root index blocks, and
071 * Bloom filter blocks, as specified by the column family.
072 */
073@RunWith(Parameterized.class)
074@Category({ RegionServerTests.class, SmallTests.class })
075public class TestCacheOnWriteInSchema {
076
077  @ClassRule
078  public static final HBaseClassTestRule CLASS_RULE =
079    HBaseClassTestRule.forClass(TestCacheOnWriteInSchema.class);
080
081  private static final Logger LOG = LoggerFactory.getLogger(TestCacheOnWriteInSchema.class);
082  @Rule
083  public TestName name = new TestName();
084
085  private static final HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU();
086  private static final String DIR = TEST_UTIL.getDataTestDir("TestCacheOnWriteInSchema").toString();
087  private static byte[] table;
088  private static byte[] family = Bytes.toBytes("family");
089  private static final int NUM_KV = 25000;
090  private static final Random rand = new Random(12983177L);
091  /** The number of valid key types possible in a store file */
092  private static final int NUM_VALID_KEY_TYPES = KeyValue.Type.values().length - 2;
093
094  private static enum CacheOnWriteType {
095    DATA_BLOCKS(BlockType.DATA, BlockType.ENCODED_DATA),
096    BLOOM_BLOCKS(BlockType.BLOOM_CHUNK),
097    INDEX_BLOCKS(BlockType.LEAF_INDEX, BlockType.INTERMEDIATE_INDEX);
098
099    private final BlockType blockType1;
100    private final BlockType blockType2;
101
102    private CacheOnWriteType(BlockType blockType) {
103      this(blockType, blockType);
104    }
105
106    private CacheOnWriteType(BlockType blockType1, BlockType blockType2) {
107      this.blockType1 = blockType1;
108      this.blockType2 = blockType2;
109    }
110
111    public boolean shouldBeCached(BlockType blockType) {
112      return blockType == blockType1 || blockType == blockType2;
113    }
114
115    public ColumnFamilyDescriptorBuilder modifyFamilySchema(ColumnFamilyDescriptorBuilder builder) {
116      switch (this) {
117        case DATA_BLOCKS:
118          builder.setCacheDataOnWrite(true);
119          break;
120        case BLOOM_BLOCKS:
121          builder.setCacheBloomsOnWrite(true);
122          break;
123        case INDEX_BLOCKS:
124          builder.setCacheIndexesOnWrite(true);
125          break;
126      }
127      return builder;
128    }
129  }
130
131  private final CacheOnWriteType cowType;
132  private Configuration conf;
133  private final String testDescription;
134  private HRegion region;
135  private HStore store;
136  private FileSystem fs;
137
138  public TestCacheOnWriteInSchema(CacheOnWriteType cowType) {
139    this.cowType = cowType;
140    testDescription = "[cacheOnWrite=" + cowType + "]";
141    System.out.println(testDescription);
142  }
143
144  @Parameters
145  public static Collection<Object[]> getParameters() {
146    List<Object[]> cowTypes = new ArrayList<>();
147    for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
148      cowTypes.add(new Object[] { cowType });
149    }
150    return cowTypes;
151  }
152
153  @Before
154  public void setUp() throws IOException {
155    // parameterized tests add [#] suffix get rid of [ and ].
156    table = Bytes.toBytes(name.getMethodName().replaceAll("[\\[\\]]", "_"));
157
158    conf = TEST_UTIL.getConfiguration();
159    conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
160    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
161    conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
162    conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, false);
163    fs = HFileSystem.get(conf);
164
165    // Create the schema
166    ColumnFamilyDescriptor hcd = cowType
167      .modifyFamilySchema(
168        ColumnFamilyDescriptorBuilder.newBuilder(family).setBloomFilterType(BloomType.ROWCOL))
169      .build();
170    TableDescriptor htd =
171      TableDescriptorBuilder.newBuilder(TableName.valueOf(table)).setColumnFamily(hcd).build();
172
173    // Create a store based on the schema
174    String id = TestCacheOnWriteInSchema.class.getName();
175    Path logdir =
176      new Path(CommonFSUtils.getRootDir(conf), AbstractFSWALProvider.getWALDirectoryName(id));
177    fs.delete(logdir, true);
178
179    RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
180
181    region = HBaseTestingUtility.createRegionAndWAL(info, logdir, conf, htd,
182      BlockCacheFactory.createBlockCache(conf));
183    store = region.getStore(hcd.getName());
184  }
185
186  @After
187  public void tearDown() throws IOException {
188    IOException ex = null;
189    try {
190      HBaseTestingUtility.closeRegionAndWAL(region);
191    } catch (IOException e) {
192      LOG.warn("Caught Exception", e);
193      ex = e;
194    }
195    try {
196      fs.delete(new Path(DIR), true);
197    } catch (IOException e) {
198      LOG.error("Could not delete " + DIR, e);
199      ex = e;
200    }
201    if (ex != null) {
202      throw ex;
203    }
204  }
205
206  @Test
207  public void testCacheOnWriteInSchema() throws IOException {
208    // Write some random data into the store
209    StoreFileWriter writer = store.getStoreEngine()
210      .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(Integer.MAX_VALUE)
211        .compression(HFile.DEFAULT_COMPRESSION_ALGORITHM).isCompaction(false)
212        .includeMVCCReadpoint(true).includesTag(false).shouldDropBehind(false));
213    writeStoreFile(writer);
214    writer.close();
215    // Verify the block types of interest were cached on write
216    readStoreFile(writer.getPath());
217  }
218
219  private void readStoreFile(Path path) throws IOException {
220    CacheConfig cacheConf = store.getCacheConfig();
221    BlockCache cache = cacheConf.getBlockCache().get();
222    HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true);
223    sf.initReader();
224    HFile.Reader reader = sf.getReader().getHFileReader();
225    try {
226      // Open a scanner with (on read) caching disabled
227      HFileScanner scanner = reader.getScanner(conf, false, false);
228      assertTrue(testDescription, scanner.seekTo());
229      // Cribbed from io.hfile.TestCacheOnWrite
230      long offset = 0;
231      while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
232        // Flags: don't cache the block, use pread, this is not a compaction.
233        // Also, pass null for expected block type to avoid checking it.
234        HFileBlock block =
235          reader.readBlock(offset, -1, false, true, false, true, null, DataBlockEncoding.NONE);
236        BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset);
237        boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
238        boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
239        final BlockType blockType = block.getBlockType();
240
241        if (
242          shouldBeCached != isCached
243            && (cowType.blockType1.equals(blockType) || cowType.blockType2.equals(blockType))
244        ) {
245          throw new AssertionError("shouldBeCached: " + shouldBeCached + "\n" + "isCached: "
246            + isCached + "\n" + "Test description: " + testDescription + "\n" + "block: " + block
247            + "\n" + "blockCacheKey: " + blockCacheKey);
248        }
249        offset += block.getOnDiskSizeWithHeader();
250      }
251    } finally {
252      reader.close();
253    }
254  }
255
256  private static KeyValue.Type generateKeyType(Random rand) {
257    if (rand.nextBoolean()) {
258      // Let's make half of KVs puts.
259      return KeyValue.Type.Put;
260    } else {
261      KeyValue.Type keyType = KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)];
262      if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) {
263        throw new RuntimeException("Generated an invalid key type: " + keyType + ". "
264          + "Probably the layout of KeyValue.Type has changed.");
265      }
266      return keyType;
267    }
268  }
269
270  private void writeStoreFile(StoreFileWriter writer) throws IOException {
271    final int rowLen = 32;
272    for (int i = 0; i < NUM_KV; ++i) {
273      byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i);
274      byte[] v = RandomKeyValueUtil.randomValue(rand);
275      int cfLen = rand.nextInt(k.length - rowLen + 1);
276      KeyValue kv = new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen,
277        k.length - rowLen - cfLen, rand.nextLong(), generateKeyType(rand), v, 0, v.length);
278      writer.append(kv);
279    }
280  }
281
282}