001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.assertEquals; 021 022import java.io.IOException; 023import java.util.ArrayList; 024import org.apache.hadoop.conf.Configuration; 025import org.apache.hadoop.hbase.CacheEvictionStats; 026import org.apache.hadoop.hbase.Cell; 027import org.apache.hadoop.hbase.HBaseClassTestRule; 028import org.apache.hadoop.hbase.HBaseTestingUtility; 029import org.apache.hadoop.hbase.HConstants; 030import org.apache.hadoop.hbase.MiniHBaseCluster; 031import org.apache.hadoop.hbase.TableName; 032import org.apache.hadoop.hbase.client.Admin; 033import org.apache.hadoop.hbase.client.AsyncAdmin; 034import org.apache.hadoop.hbase.client.AsyncConnection; 035import org.apache.hadoop.hbase.client.ConnectionFactory; 036import org.apache.hadoop.hbase.client.Scan; 037import org.apache.hadoop.hbase.client.Table; 038import org.apache.hadoop.hbase.io.hfile.BlockCache; 039import org.apache.hadoop.hbase.testclassification.LargeTests; 040import org.apache.hadoop.hbase.util.Bytes; 041import org.junit.After; 042import org.junit.Before; 043import org.junit.ClassRule; 044import org.junit.Test; 045import org.junit.experimental.categories.Category; 046import org.junit.runner.RunWith; 047import org.junit.runners.Parameterized; 048import org.slf4j.Logger; 049import org.slf4j.LoggerFactory; 050 051@Category(LargeTests.class) 052@RunWith(Parameterized.class) 053public class TestClearRegionBlockCache { 054 055 @ClassRule 056 public static final HBaseClassTestRule CLASS_RULE = 057 HBaseClassTestRule.forClass(TestClearRegionBlockCache.class); 058 059 private static final Logger LOG = LoggerFactory.getLogger(TestClearRegionBlockCache.class); 060 private static final TableName TABLE_NAME = TableName.valueOf("testClearRegionBlockCache"); 061 private static final byte[] FAMILY = Bytes.toBytes("family"); 062 private static final byte[][] SPLIT_KEY = new byte[][] { Bytes.toBytes("5") }; 063 private static final int NUM_RS = 2; 064 065 private final HBaseTestingUtility HTU = new HBaseTestingUtility(); 066 067 private Configuration CONF = HTU.getConfiguration(); 068 private Table table; 069 private HRegionServer rs1, rs2; 070 private MiniHBaseCluster cluster; 071 072 @Parameterized.Parameter 073 public String cacheType; 074 075 @Parameterized.Parameters(name = "{index}: {0}") 076 public static Object[] data() { 077 return new Object[] { "lru", "bucket" }; 078 } 079 080 @Before 081 public void setup() throws Exception { 082 if (cacheType.equals("bucket")) { 083 CONF.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); 084 CONF.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 30); 085 } 086 087 cluster = HTU.startMiniCluster(NUM_RS); 088 rs1 = cluster.getRegionServer(0); 089 rs2 = cluster.getRegionServer(1); 090 091 // Create table 092 table = HTU.createTable(TABLE_NAME, FAMILY, SPLIT_KEY); 093 094 HTU.loadNumericRows(table, FAMILY, 1, 10); 095 HTU.flush(TABLE_NAME); 096 } 097 098 @After 099 public void teardown() throws Exception { 100 HTU.shutdownMiniCluster(); 101 } 102 103 @Test 104 public void testClearBlockCache() throws Exception { 105 BlockCache blockCache1 = rs1.getBlockCache().get(); 106 BlockCache blockCache2 = rs2.getBlockCache().get(); 107 108 long initialBlockCount1 = blockCache1.getBlockCount(); 109 long initialBlockCount2 = blockCache2.getBlockCount(); 110 111 // scan will cause blocks to be added in BlockCache 112 scanAllRegionsForRS(rs1); 113 assertEquals(blockCache1.getBlockCount() - initialBlockCount1, 114 HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); 115 clearRegionBlockCache(rs1); 116 117 scanAllRegionsForRS(rs2); 118 assertEquals(blockCache2.getBlockCount() - initialBlockCount2, 119 HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); 120 clearRegionBlockCache(rs2); 121 122 assertEquals("" + blockCache1.getBlockCount(), initialBlockCount1, blockCache1.getBlockCount()); 123 assertEquals("" + blockCache2.getBlockCount(), initialBlockCount2, blockCache2.getBlockCount()); 124 } 125 126 @Test 127 public void testClearBlockCacheFromAdmin() throws Exception { 128 Admin admin = HTU.getAdmin(); 129 130 BlockCache blockCache1 = rs1.getBlockCache().get(); 131 BlockCache blockCache2 = rs2.getBlockCache().get(); 132 long initialBlockCount1 = blockCache1.getBlockCount(); 133 long initialBlockCount2 = blockCache2.getBlockCount(); 134 135 // scan will cause blocks to be added in BlockCache 136 scanAllRegionsForRS(rs1); 137 assertEquals(blockCache1.getBlockCount() - initialBlockCount1, 138 HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); 139 scanAllRegionsForRS(rs2); 140 assertEquals(blockCache2.getBlockCount() - initialBlockCount2, 141 HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); 142 143 CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME); 144 assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) 145 + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); 146 assertEquals(initialBlockCount1, blockCache1.getBlockCount()); 147 assertEquals(initialBlockCount2, blockCache2.getBlockCount()); 148 } 149 150 @Test 151 public void testClearBlockCacheFromAsyncAdmin() throws Exception { 152 try (AsyncConnection conn = 153 ConnectionFactory.createAsyncConnection(HTU.getConfiguration()).get()) { 154 AsyncAdmin admin = conn.getAdmin(); 155 156 BlockCache blockCache1 = rs1.getBlockCache().get(); 157 BlockCache blockCache2 = rs2.getBlockCache().get(); 158 long initialBlockCount1 = blockCache1.getBlockCount(); 159 long initialBlockCount2 = blockCache2.getBlockCount(); 160 161 // scan will cause blocks to be added in BlockCache 162 scanAllRegionsForRS(rs1); 163 assertEquals(blockCache1.getBlockCount() - initialBlockCount1, 164 HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); 165 scanAllRegionsForRS(rs2); 166 assertEquals(blockCache2.getBlockCount() - initialBlockCount2, 167 HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); 168 169 CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME).get(); 170 assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) 171 + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); 172 assertEquals(initialBlockCount1, blockCache1.getBlockCount()); 173 assertEquals(initialBlockCount2, blockCache2.getBlockCount()); 174 } 175 } 176 177 private void scanAllRegionsForRS(HRegionServer rs) throws IOException { 178 for (Region region : rs.getRegions(TABLE_NAME)) { 179 RegionScanner scanner = region.getScanner(new Scan()); 180 while (scanner.next(new ArrayList<Cell>())) 181 ; 182 } 183 } 184 185 private void clearRegionBlockCache(HRegionServer rs) { 186 for (Region region : rs.getRegions(TABLE_NAME)) { 187 rs.clearRegionBlockCache(region); 188 } 189 } 190}