001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.apache.hadoop.hbase.HBaseTestCase.addContent;
021import static org.junit.Assert.assertEquals;
022
023import java.util.ArrayList;
024import java.util.List;
025
026import org.apache.hadoop.conf.Configuration;
027import org.apache.hadoop.fs.Path;
028import org.apache.hadoop.hbase.Cell;
029import org.apache.hadoop.hbase.HBaseClassTestRule;
030import org.apache.hadoop.hbase.HBaseTestingUtility;
031import org.apache.hadoop.hbase.HColumnDescriptor;
032import org.apache.hadoop.hbase.HTableDescriptor;
033import org.apache.hadoop.hbase.KeyValueUtil;
034import org.apache.hadoop.hbase.TableName;
035import org.apache.hadoop.hbase.client.RegionInfo;
036import org.apache.hadoop.hbase.client.RegionInfoBuilder;
037import org.apache.hadoop.hbase.client.Scan;
038import org.apache.hadoop.hbase.client.TableDescriptor;
039import org.apache.hadoop.hbase.io.compress.Compression;
040import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
041import org.apache.hadoop.hbase.io.hfile.BlockCache;
042import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
043import org.apache.hadoop.hbase.io.hfile.CacheStats;
044import org.apache.hadoop.hbase.testclassification.RegionServerTests;
045import org.apache.hadoop.hbase.testclassification.SmallTests;
046import org.apache.hadoop.hbase.util.Bytes;
047import org.junit.Before;
048import org.junit.ClassRule;
049import org.junit.Test;
050import org.junit.experimental.categories.Category;
051
052@SuppressWarnings("deprecation")
053@Category({RegionServerTests.class, SmallTests.class})
054public class TestBlocksScanned {
055
056  @ClassRule
057  public static final HBaseClassTestRule CLASS_RULE =
058      HBaseClassTestRule.forClass(TestBlocksScanned.class);
059
060  private static byte [] FAMILY = Bytes.toBytes("family");
061  private static byte [] COL = Bytes.toBytes("col");
062  private static byte [] START_KEY = Bytes.toBytes("aaa");
063  private static byte [] END_KEY = Bytes.toBytes("zzz");
064  private static int BLOCK_SIZE = 70;
065
066  private static HBaseTestingUtility TEST_UTIL = null;
067  private Configuration conf;
068  private Path testDir;
069
070  @Before
071  public void setUp() throws Exception {
072    TEST_UTIL = new HBaseTestingUtility();
073    conf = TEST_UTIL.getConfiguration();
074    testDir = TEST_UTIL.getDataTestDir("TestBlocksScanned");
075  }
076
077  @Test
078  public void testBlocksScanned() throws Exception {
079    byte [] tableName = Bytes.toBytes("TestBlocksScanned");
080    HTableDescriptor table = new HTableDescriptor(TableName.valueOf(tableName));
081
082    table.addFamily(
083        new HColumnDescriptor(FAMILY)
084        .setMaxVersions(10)
085        .setBlockCacheEnabled(true)
086        .setBlocksize(BLOCK_SIZE)
087        .setCompressionType(Compression.Algorithm.NONE)
088        );
089    _testBlocksScanned(table);
090  }
091
092  @Test
093  public void testBlocksScannedWithEncoding() throws Exception {
094    byte [] tableName = Bytes.toBytes("TestBlocksScannedWithEncoding");
095    HTableDescriptor table = new HTableDescriptor(TableName.valueOf(tableName));
096
097    table.addFamily(
098        new HColumnDescriptor(FAMILY)
099        .setMaxVersions(10)
100        .setBlockCacheEnabled(true)
101        .setDataBlockEncoding(DataBlockEncoding.FAST_DIFF)
102        .setBlocksize(BLOCK_SIZE)
103        .setCompressionType(Compression.Algorithm.NONE)
104        );
105    _testBlocksScanned(table);
106  }
107
108  private void _testBlocksScanned(TableDescriptor td) throws Exception {
109    BlockCache blockCache = BlockCacheFactory.createBlockCache(conf);
110    RegionInfo regionInfo =
111        RegionInfoBuilder.newBuilder(td.getTableName()).setStartKey(START_KEY).setEndKey(END_KEY)
112            .build();
113    HRegion r = HBaseTestingUtility.createRegionAndWAL(regionInfo, testDir, conf, td, blockCache);
114    addContent(r, FAMILY, COL);
115    r.flush(true);
116
117    CacheStats stats = blockCache.getStats();
118    long before = stats.getHitCount() + stats.getMissCount();
119    // Do simple test of getting one row only first.
120    Scan scan = new Scan().withStartRow(Bytes.toBytes("aaa")).withStopRow(Bytes.toBytes("aaz"))
121        .setReadType(Scan.ReadType.PREAD);
122    scan.addColumn(FAMILY, COL);
123    scan.setMaxVersions(1);
124
125    InternalScanner s = r.getScanner(scan);
126    List<Cell> results = new ArrayList<>();
127    while (s.next(results));
128    s.close();
129
130    int expectResultSize = 'z' - 'a';
131    assertEquals(expectResultSize, results.size());
132
133    int kvPerBlock = (int) Math.ceil(BLOCK_SIZE /
134        (double) KeyValueUtil.ensureKeyValue(results.get(0)).getLength());
135    assertEquals(2, kvPerBlock);
136
137    long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
138    long expectIndexBlockRead = expectDataBlockRead;
139
140    assertEquals(expectIndexBlockRead + expectDataBlockRead,
141        stats.getHitCount() + stats.getMissCount() - before);
142  }
143}