001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.junit.Assert.assertEquals;
021
022import java.io.IOException;
023import java.util.ArrayList;
024import org.apache.hadoop.conf.Configuration;
025import org.apache.hadoop.hbase.CacheEvictionStats;
026import org.apache.hadoop.hbase.Cell;
027import org.apache.hadoop.hbase.HBaseClassTestRule;
028import org.apache.hadoop.hbase.HBaseTestingUtility;
029import org.apache.hadoop.hbase.HConstants;
030import org.apache.hadoop.hbase.MiniHBaseCluster;
031import org.apache.hadoop.hbase.TableName;
032import org.apache.hadoop.hbase.client.Admin;
033import org.apache.hadoop.hbase.client.AsyncAdmin;
034import org.apache.hadoop.hbase.client.ConnectionFactory;
035import org.apache.hadoop.hbase.client.Scan;
036import org.apache.hadoop.hbase.client.Table;
037import org.apache.hadoop.hbase.io.hfile.BlockCache;
038import org.apache.hadoop.hbase.testclassification.MediumTests;
039import org.apache.hadoop.hbase.util.Bytes;
040import org.junit.After;
041import org.junit.Before;
042import org.junit.ClassRule;
043import org.junit.Test;
044import org.junit.experimental.categories.Category;
045import org.junit.runner.RunWith;
046import org.junit.runners.Parameterized;
047import org.slf4j.Logger;
048import org.slf4j.LoggerFactory;
049
050@Category(MediumTests.class)
051@RunWith(Parameterized.class)
052public class TestClearRegionBlockCache {
053
054  @ClassRule
055  public static final HBaseClassTestRule CLASS_RULE =
056      HBaseClassTestRule.forClass(TestClearRegionBlockCache.class);
057
058  private static final Logger LOG = LoggerFactory.getLogger(TestClearRegionBlockCache.class);
059  private static final TableName TABLE_NAME = TableName.valueOf("testClearRegionBlockCache");
060  private static final byte[] FAMILY = Bytes.toBytes("family");
061  private static final byte[][] SPLIT_KEY = new byte[][] { Bytes.toBytes("5") };
062  private static final int NUM_RS = 2;
063
064  private final HBaseTestingUtility HTU = new HBaseTestingUtility();
065
066  private Configuration CONF = HTU.getConfiguration();
067  private Table table;
068  private HRegionServer rs1, rs2;
069  private MiniHBaseCluster cluster;
070
071  @Parameterized.Parameter public String cacheType;
072
073  @Parameterized.Parameters(name = "{index}: {0}")
074  public static Object[] data() {
075    return new Object[] { "lru", "bucket" };
076  }
077
078  @Before
079  public void setup() throws Exception {
080    if (cacheType.equals("bucket")) {
081      CONF.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
082      CONF.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 30);
083    }
084
085    cluster = HTU.startMiniCluster(NUM_RS);
086    rs1 = cluster.getRegionServer(0);
087    rs2 = cluster.getRegionServer(1);
088
089    // Create table
090    table = HTU.createTable(TABLE_NAME, FAMILY, SPLIT_KEY);
091
092    HTU.loadNumericRows(table, FAMILY, 1, 10);
093    HTU.flush(TABLE_NAME);
094  }
095
096  @After
097  public void teardown() throws Exception {
098    HTU.shutdownMiniCluster();
099  }
100
101  @Test
102  public void testClearBlockCache() throws Exception {
103    BlockCache blockCache1 = rs1.getBlockCache().get();
104    BlockCache blockCache2 = rs2.getBlockCache().get();
105
106    long initialBlockCount1 = blockCache1.getBlockCount();
107    long initialBlockCount2 = blockCache2.getBlockCount();
108
109    // scan will cause blocks to be added in BlockCache
110    scanAllRegionsForRS(rs1);
111    assertEquals(blockCache1.getBlockCount() - initialBlockCount1,
112      HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
113    clearRegionBlockCache(rs1);
114
115    scanAllRegionsForRS(rs2);
116    assertEquals(blockCache2.getBlockCount() - initialBlockCount2,
117      HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
118    clearRegionBlockCache(rs2);
119
120    assertEquals(initialBlockCount1, blockCache1.getBlockCount());
121    assertEquals(initialBlockCount2, blockCache2.getBlockCount());
122  }
123
124  @Test
125  public void testClearBlockCacheFromAdmin() throws Exception {
126    Admin admin = HTU.getAdmin();
127
128    BlockCache blockCache1 = rs1.getBlockCache().get();
129    BlockCache blockCache2 = rs2.getBlockCache().get();
130    long initialBlockCount1 = blockCache1.getBlockCount();
131    long initialBlockCount2 = blockCache2.getBlockCount();
132
133    // scan will cause blocks to be added in BlockCache
134    scanAllRegionsForRS(rs1);
135    assertEquals(blockCache1.getBlockCount() - initialBlockCount1,
136        HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
137    scanAllRegionsForRS(rs2);
138    assertEquals(blockCache2.getBlockCount() - initialBlockCount2,
139        HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
140
141    CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME);
142    assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)
143        + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
144    assertEquals(initialBlockCount1, blockCache1.getBlockCount());
145    assertEquals(initialBlockCount2, blockCache2.getBlockCount());
146  }
147
148  @Test
149  public void testClearBlockCacheFromAsyncAdmin() throws Exception {
150    AsyncAdmin admin =
151        ConnectionFactory.createAsyncConnection(HTU.getConfiguration()).get().getAdmin();
152
153    BlockCache blockCache1 = rs1.getBlockCache().get();
154    BlockCache blockCache2 = rs2.getBlockCache().get();
155    long initialBlockCount1 = blockCache1.getBlockCount();
156    long initialBlockCount2 = blockCache2.getBlockCount();
157
158    // scan will cause blocks to be added in BlockCache
159    scanAllRegionsForRS(rs1);
160    assertEquals(blockCache1.getBlockCount() - initialBlockCount1,
161        HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
162    scanAllRegionsForRS(rs2);
163    assertEquals(blockCache2.getBlockCount() - initialBlockCount2,
164        HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
165
166    CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME).get();
167    assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU
168        .getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
169    assertEquals(initialBlockCount1, blockCache1.getBlockCount());
170    assertEquals(initialBlockCount2, blockCache2.getBlockCount());
171  }
172
173  private void scanAllRegionsForRS(HRegionServer rs) throws IOException {
174    for (Region region : rs.getRegions(TABLE_NAME)) {
175      RegionScanner scanner = region.getScanner(new Scan());
176      while (scanner.next(new ArrayList<Cell>()));
177    }
178  }
179
180  private void clearRegionBlockCache(HRegionServer rs) {
181    for (Region region : rs.getRegions(TABLE_NAME)) {
182      rs.clearRegionBlockCache(region);
183    }
184  }
185}