001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.assertTrue; 021 022import java.io.IOException; 023import java.util.ArrayList; 024import java.util.Collection; 025import java.util.List; 026import java.util.Random; 027import org.apache.hadoop.conf.Configuration; 028import org.apache.hadoop.fs.FileSystem; 029import org.apache.hadoop.fs.Path; 030import org.apache.hadoop.hbase.HBaseClassTestRule; 031import org.apache.hadoop.hbase.HBaseTestingUtility; 032import org.apache.hadoop.hbase.KeyValue; 033import org.apache.hadoop.hbase.TableName; 034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 035import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 036import org.apache.hadoop.hbase.client.RegionInfo; 037import org.apache.hadoop.hbase.client.RegionInfoBuilder; 038import org.apache.hadoop.hbase.client.TableDescriptor; 039import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 040import org.apache.hadoop.hbase.fs.HFileSystem; 041import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; 042import org.apache.hadoop.hbase.io.hfile.BlockCache; 043import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; 044import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; 045import org.apache.hadoop.hbase.io.hfile.BlockType; 046import org.apache.hadoop.hbase.io.hfile.CacheConfig; 047import org.apache.hadoop.hbase.io.hfile.HFile; 048import org.apache.hadoop.hbase.io.hfile.HFileBlock; 049import org.apache.hadoop.hbase.io.hfile.HFileScanner; 050import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil; 051import org.apache.hadoop.hbase.testclassification.MediumTests; 052import org.apache.hadoop.hbase.testclassification.RegionServerTests; 053import org.apache.hadoop.hbase.util.Bytes; 054import org.apache.hadoop.hbase.util.FSUtils; 055import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; 056import org.apache.hadoop.hbase.wal.WALFactory; 057import org.junit.After; 058import org.junit.Before; 059import org.junit.ClassRule; 060import org.junit.Rule; 061import org.junit.Test; 062import org.junit.experimental.categories.Category; 063import org.junit.rules.TestName; 064import org.junit.runner.RunWith; 065import org.junit.runners.Parameterized; 066import org.junit.runners.Parameterized.Parameters; 067import org.slf4j.Logger; 068import org.slf4j.LoggerFactory; 069 070/** 071 * Tests {@link HFile} cache-on-write functionality for data blocks, non-root 072 * index blocks, and Bloom filter blocks, as specified by the column family. 073 */ 074@RunWith(Parameterized.class) 075@Category({RegionServerTests.class, MediumTests.class}) 076public class TestCacheOnWriteInSchema { 077 078 @ClassRule 079 public static final HBaseClassTestRule CLASS_RULE = 080 HBaseClassTestRule.forClass(TestCacheOnWriteInSchema.class); 081 082 private static final Logger LOG = LoggerFactory.getLogger(TestCacheOnWriteInSchema.class); 083 @Rule public TestName name = new TestName(); 084 085 private static final HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU(); 086 private static final String DIR = TEST_UTIL.getDataTestDir("TestCacheOnWriteInSchema").toString(); 087 private static byte [] table; 088 private static byte [] family = Bytes.toBytes("family"); 089 private static final int NUM_KV = 25000; 090 private static final Random rand = new Random(12983177L); 091 /** The number of valid key types possible in a store file */ 092 private static final int NUM_VALID_KEY_TYPES = 093 KeyValue.Type.values().length - 2; 094 095 private static enum CacheOnWriteType { 096 DATA_BLOCKS(BlockType.DATA, BlockType.ENCODED_DATA), 097 BLOOM_BLOCKS(BlockType.BLOOM_CHUNK), 098 INDEX_BLOCKS(BlockType.LEAF_INDEX, BlockType.INTERMEDIATE_INDEX); 099 100 private final BlockType blockType1; 101 private final BlockType blockType2; 102 103 private CacheOnWriteType(BlockType blockType) { 104 this(blockType, blockType); 105 } 106 107 private CacheOnWriteType(BlockType blockType1, BlockType blockType2) { 108 this.blockType1 = blockType1; 109 this.blockType2 = blockType2; 110 } 111 112 public boolean shouldBeCached(BlockType blockType) { 113 return blockType == blockType1 || blockType == blockType2; 114 } 115 116 public ColumnFamilyDescriptorBuilder modifyFamilySchema(ColumnFamilyDescriptorBuilder builder) { 117 switch (this) { 118 case DATA_BLOCKS: 119 builder.setCacheDataOnWrite(true); 120 break; 121 case BLOOM_BLOCKS: 122 builder.setCacheBloomsOnWrite(true); 123 break; 124 case INDEX_BLOCKS: 125 builder.setCacheIndexesOnWrite(true); 126 break; 127 } 128 return builder; 129 } 130 } 131 132 private final CacheOnWriteType cowType; 133 private Configuration conf; 134 private final String testDescription; 135 private HRegion region; 136 private HStore store; 137 private WALFactory walFactory; 138 private FileSystem fs; 139 140 public TestCacheOnWriteInSchema(CacheOnWriteType cowType) { 141 this.cowType = cowType; 142 testDescription = "[cacheOnWrite=" + cowType + "]"; 143 System.out.println(testDescription); 144 } 145 146 @Parameters 147 public static Collection<Object[]> getParameters() { 148 List<Object[]> cowTypes = new ArrayList<>(); 149 for (CacheOnWriteType cowType : CacheOnWriteType.values()) { 150 cowTypes.add(new Object[] { cowType }); 151 } 152 return cowTypes; 153 } 154 155 @Before 156 public void setUp() throws IOException { 157 // parameterized tests add [#] suffix get rid of [ and ]. 158 table = Bytes.toBytes(name.getMethodName().replaceAll("[\\[\\]]", "_")); 159 160 conf = TEST_UTIL.getConfiguration(); 161 conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION); 162 conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false); 163 conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false); 164 conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, false); 165 fs = HFileSystem.get(conf); 166 167 // Create the schema 168 ColumnFamilyDescriptor hcd = cowType 169 .modifyFamilySchema( 170 ColumnFamilyDescriptorBuilder.newBuilder(family).setBloomFilterType(BloomType.ROWCOL)) 171 .build(); 172 TableDescriptor htd = 173 TableDescriptorBuilder.newBuilder(TableName.valueOf(table)).setColumnFamily(hcd).build(); 174 175 // Create a store based on the schema 176 String id = TestCacheOnWriteInSchema.class.getName(); 177 Path logdir = new Path(FSUtils.getRootDir(conf), AbstractFSWALProvider.getWALDirectoryName(id)); 178 fs.delete(logdir, true); 179 180 RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); 181 walFactory = new WALFactory(conf, id); 182 183 region = TEST_UTIL.createLocalHRegion(info, htd, walFactory.getWAL(info)); 184 region.setBlockCache(BlockCacheFactory.createBlockCache(conf)); 185 store = new HStore(region, hcd, conf, false); 186 } 187 188 @After 189 public void tearDown() throws IOException { 190 IOException ex = null; 191 try { 192 region.close(); 193 } catch (IOException e) { 194 LOG.warn("Caught Exception", e); 195 ex = e; 196 } 197 try { 198 walFactory.close(); 199 } catch (IOException e) { 200 LOG.warn("Caught Exception", e); 201 ex = e; 202 } 203 try { 204 fs.delete(new Path(DIR), true); 205 } catch (IOException e) { 206 LOG.error("Could not delete " + DIR, e); 207 ex = e; 208 } 209 if (ex != null) { 210 throw ex; 211 } 212 } 213 214 @Test 215 public void testCacheOnWriteInSchema() throws IOException { 216 // Write some random data into the store 217 StoreFileWriter writer = store.createWriterInTmp(Integer.MAX_VALUE, 218 HFile.DEFAULT_COMPRESSION_ALGORITHM, false, true, false, false); 219 writeStoreFile(writer); 220 writer.close(); 221 // Verify the block types of interest were cached on write 222 readStoreFile(writer.getPath()); 223 } 224 225 private void readStoreFile(Path path) throws IOException { 226 CacheConfig cacheConf = store.getCacheConfig(); 227 BlockCache cache = cacheConf.getBlockCache().get(); 228 HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true); 229 sf.initReader(); 230 HFile.Reader reader = sf.getReader().getHFileReader(); 231 try { 232 // Open a scanner with (on read) caching disabled 233 HFileScanner scanner = reader.getScanner(false, false); 234 assertTrue(testDescription, scanner.seekTo()); 235 // Cribbed from io.hfile.TestCacheOnWrite 236 long offset = 0; 237 while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { 238 // Flags: don't cache the block, use pread, this is not a compaction. 239 // Also, pass null for expected block type to avoid checking it. 240 HFileBlock block = reader.readBlock(offset, -1, false, true, 241 false, true, null, DataBlockEncoding.NONE); 242 BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), 243 offset); 244 boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null; 245 boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType()); 246 if (shouldBeCached != isCached) { 247 throw new AssertionError( 248 "shouldBeCached: " + shouldBeCached+ "\n" + 249 "isCached: " + isCached + "\n" + 250 "Test description: " + testDescription + "\n" + 251 "block: " + block + "\n" + 252 "blockCacheKey: " + blockCacheKey); 253 } 254 offset += block.getOnDiskSizeWithHeader(); 255 } 256 } finally { 257 reader.close(); 258 } 259 } 260 261 private static KeyValue.Type generateKeyType(Random rand) { 262 if (rand.nextBoolean()) { 263 // Let's make half of KVs puts. 264 return KeyValue.Type.Put; 265 } else { 266 KeyValue.Type keyType = 267 KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)]; 268 if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) 269 { 270 throw new RuntimeException("Generated an invalid key type: " + keyType 271 + ". " + "Probably the layout of KeyValue.Type has changed."); 272 } 273 return keyType; 274 } 275 } 276 277 private void writeStoreFile(StoreFileWriter writer) throws IOException { 278 final int rowLen = 32; 279 for (int i = 0; i < NUM_KV; ++i) { 280 byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i); 281 byte[] v = RandomKeyValueUtil.randomValue(rand); 282 int cfLen = rand.nextInt(k.length - rowLen + 1); 283 KeyValue kv = new KeyValue( 284 k, 0, rowLen, 285 k, rowLen, cfLen, 286 k, rowLen + cfLen, k.length - rowLen - cfLen, 287 rand.nextLong(), 288 generateKeyType(rand), 289 v, 0, v.length); 290 writer.append(kv); 291 } 292 } 293 294} 295