001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase; 019 020import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; 021import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; 022import static org.apache.hadoop.hbase.io.hfile.CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY; 023import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_CLOSE_KEY; 024import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_SPLIT_KEY; 025import static org.apache.hadoop.hbase.io.hfile.CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY; 026import static org.junit.Assert.assertTrue; 027 028import java.io.IOException; 029import java.util.ArrayList; 030import java.util.Collection; 031import java.util.List; 032import java.util.Map; 033import java.util.function.BiConsumer; 034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 035import org.apache.hadoop.hbase.client.Put; 036import org.apache.hadoop.hbase.client.Table; 037import org.apache.hadoop.hbase.client.TableDescriptor; 038import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 039import org.apache.hadoop.hbase.regionserver.HStoreFile; 040import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; 041import org.apache.hadoop.hbase.testclassification.MediumTests; 042import org.apache.hadoop.hbase.testclassification.MiscTests; 043import org.apache.hadoop.hbase.util.Bytes; 044import org.apache.hadoop.hbase.util.Pair; 045import org.junit.Before; 046import org.junit.BeforeClass; 047import org.junit.ClassRule; 048import org.junit.Test; 049import org.junit.experimental.categories.Category; 050import org.slf4j.Logger; 051import org.slf4j.LoggerFactory; 052 053@Category({ MiscTests.class, MediumTests.class }) 054public class TestCacheEviction { 055 056 @ClassRule 057 public static final HBaseClassTestRule CLASS_RULE = 058 HBaseClassTestRule.forClass(TestCacheEviction.class); 059 060 private static final Logger LOG = LoggerFactory.getLogger(TestCacheEviction.class); 061 062 private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); 063 064 @BeforeClass 065 public static void setUp() throws Exception { 066 UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, 1000); 067 UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); 068 UTIL.getConfiguration().setBoolean(CACHE_BLOCKS_ON_WRITE_KEY, true); 069 UTIL.getConfiguration().setBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, true); 070 UTIL.getConfiguration().setInt(BUCKET_CACHE_SIZE_KEY, 200); 071 UTIL.getConfiguration().set(StoreFileTrackerFactory.TRACKER_IMPL, "FILE"); 072 } 073 074 @Before 075 public void testSetup() { 076 UTIL.getConfiguration().set(BUCKET_CACHE_IOENGINE_KEY, 077 "file:" + UTIL.getDataTestDir() + "/bucketcache"); 078 } 079 080 @Test 081 public void testEvictOnSplit() throws Exception { 082 doTestEvictOnSplit("testEvictOnSplit", true, 083 (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null), 084 (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) == null)); 085 } 086 087 @Test 088 public void testDoesntEvictOnSplit() throws Exception { 089 doTestEvictOnSplit("testDoesntEvictOnSplit", false, 090 (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null), 091 (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null)); 092 } 093 094 @Test 095 public void testEvictOnClose() throws Exception { 096 doTestEvictOnClose("testEvictOnClose", true, 097 (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null), 098 (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) == null)); 099 } 100 101 @Test 102 public void testDoesntEvictOnClose() throws Exception { 103 doTestEvictOnClose("testDoesntEvictOnClose", false, 104 (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null), 105 (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null)); 106 } 107 108 private void doTestEvictOnSplit(String table, boolean evictOnSplit, 109 BiConsumer<String, Map<String, Pair<String, Long>>> predicateBeforeSplit, 110 BiConsumer<String, Map<String, Pair<String, Long>>> predicateAfterSplit) throws Exception { 111 UTIL.startMiniCluster(1); 112 try { 113 TableName tableName = TableName.valueOf(table); 114 createTable(tableName, true); 115 Collection<HStoreFile> files = 116 UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getStores().get(0).getStorefiles(); 117 checkCacheForBlocks(tableName, files, predicateBeforeSplit); 118 UTIL.getMiniHBaseCluster().getRegionServer(0).getConfiguration() 119 .setBoolean(EVICT_BLOCKS_ON_SPLIT_KEY, evictOnSplit); 120 UTIL.getAdmin().split(tableName, Bytes.toBytes("row-500")); 121 Waiter.waitFor(UTIL.getConfiguration(), 30000, 122 () -> UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 2); 123 UTIL.waitUntilNoRegionsInTransition(); 124 checkCacheForBlocks(tableName, files, predicateAfterSplit); 125 } finally { 126 UTIL.shutdownMiniCluster(); 127 } 128 } 129 130 private void doTestEvictOnClose(String table, boolean evictOnClose, 131 BiConsumer<String, Map<String, Pair<String, Long>>> predicateBeforeClose, 132 BiConsumer<String, Map<String, Pair<String, Long>>> predicateAfterClose) throws Exception { 133 UTIL.startMiniCluster(1); 134 try { 135 TableName tableName = TableName.valueOf(table); 136 createTable(tableName, true); 137 Collection<HStoreFile> files = 138 UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getStores().get(0).getStorefiles(); 139 checkCacheForBlocks(tableName, files, predicateBeforeClose); 140 UTIL.getMiniHBaseCluster().getRegionServer(0).getConfiguration() 141 .setBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, evictOnClose); 142 UTIL.getAdmin().disableTable(tableName); 143 UTIL.waitUntilNoRegionsInTransition(); 144 checkCacheForBlocks(tableName, files, predicateAfterClose); 145 } finally { 146 UTIL.shutdownMiniCluster(); 147 } 148 } 149 150 private void createTable(TableName tableName, boolean shouldFlushTable) 151 throws IOException, InterruptedException { 152 byte[] family = Bytes.toBytes("CF"); 153 TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) 154 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); 155 UTIL.getAdmin().createTable(td); 156 UTIL.waitTableAvailable(tableName); 157 Table tbl = UTIL.getConnection().getTable(tableName); 158 List<Put> puts = new ArrayList<>(); 159 for (int i = 0; i < 1000; i++) { 160 Put p = new Put(Bytes.toBytes("row-" + i)); 161 p.addColumn(family, Bytes.toBytes(1), Bytes.toBytes("val-" + i)); 162 puts.add(p); 163 } 164 tbl.put(puts); 165 if (shouldFlushTable) { 166 UTIL.getAdmin().flush(tableName); 167 Thread.sleep(5000); 168 } 169 } 170 171 private void checkCacheForBlocks(TableName tableName, Collection<HStoreFile> files, 172 BiConsumer<String, Map<String, Pair<String, Long>>> checker) { 173 files.forEach(f -> { 174 UTIL.getMiniHBaseCluster().getRegionServer(0).getBlockCache().ifPresent(cache -> { 175 cache.getFullyCachedFiles().ifPresent(m -> { 176 checker.accept(f.getPath().getName(), m); 177 }); 178 assertTrue(cache.getFullyCachedFiles().isPresent()); 179 }); 180 }); 181 } 182 183 @Test 184 public void testNoCacheWithoutFlush() throws Exception { 185 UTIL.startMiniCluster(1); 186 try { 187 TableName tableName = TableName.valueOf("tableNoCache"); 188 createTable(tableName, false); 189 checkRegionCached(tableName, false); 190 } finally { 191 UTIL.shutdownMiniCluster(); 192 } 193 } 194 195 @Test 196 public void testCacheWithFlush() throws Exception { 197 UTIL.startMiniCluster(1); 198 try { 199 TableName tableName = TableName.valueOf("tableWithFlush"); 200 createTable(tableName, true); 201 checkRegionCached(tableName, true); 202 } finally { 203 UTIL.shutdownMiniCluster(); 204 } 205 } 206 207 private void checkRegionCached(TableName tableName, boolean isCached) throws IOException { 208 UTIL.getMiniHBaseCluster().getRegions(tableName).forEach(r -> { 209 try { 210 UTIL.getMiniHBaseCluster().getClusterMetrics().getLiveServerMetrics().forEach((sn, sm) -> { 211 for (Map.Entry<byte[], RegionMetrics> rm : sm.getRegionMetrics().entrySet()) { 212 if (rm.getValue().getNameAsString().equals(r.getRegionInfo().getRegionNameAsString())) { 213 assertTrue(isCached == (rm.getValue().getCurrentRegionCachedRatio() > 0.0f)); 214 } 215 } 216 }); 217 } catch (IOException e) { 218 throw new RuntimeException(e); 219 } 220 }); 221 } 222}