001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver.compactions; 019 020import static org.junit.Assert.assertEquals; 021 022import java.io.IOException; 023import java.util.List; 024import java.util.concurrent.ThreadLocalRandom; 025 026import org.apache.hadoop.conf.Configuration; 027import org.apache.hadoop.hbase.DoNotRetryIOException; 028import org.apache.hadoop.hbase.HBaseClassTestRule; 029import org.apache.hadoop.hbase.HBaseTestingUtility; 030import org.apache.hadoop.hbase.HConstants; 031import org.apache.hadoop.hbase.MiniHBaseCluster; 032import org.apache.hadoop.hbase.TableName; 033import org.apache.hadoop.hbase.Waiter; 034import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; 035import org.apache.hadoop.hbase.client.Admin; 036import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 037import org.apache.hadoop.hbase.client.Put; 038import org.apache.hadoop.hbase.client.Table; 039import org.apache.hadoop.hbase.client.TableDescriptor; 040import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 041import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; 042import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; 043import org.apache.hadoop.hbase.regionserver.HRegion; 044import org.apache.hadoop.hbase.regionserver.HRegionServer; 045import org.apache.hadoop.hbase.regionserver.HStore; 046import org.apache.hadoop.hbase.regionserver.HStoreFile; 047import org.apache.hadoop.hbase.testclassification.MediumTests; 048import org.apache.hadoop.hbase.testclassification.RegionServerTests; 049import org.apache.hadoop.hbase.util.Bytes; 050import org.apache.hadoop.hbase.util.EnvironmentEdge; 051import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 052import org.apache.hadoop.hbase.util.JVMClusterUtil; 053import org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge; 054import org.junit.AfterClass; 055import org.junit.Assert; 056import org.junit.BeforeClass; 057import org.junit.ClassRule; 058import org.junit.Rule; 059import org.junit.Test; 060import org.junit.experimental.categories.Category; 061import org.junit.rules.ExpectedException; 062import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; 063 064@Category({ RegionServerTests.class, MediumTests.class }) 065public class TestFIFOCompactionPolicy { 066 067 @ClassRule 068 public static final HBaseClassTestRule CLASS_RULE = 069 HBaseClassTestRule.forClass(TestFIFOCompactionPolicy.class); 070 071 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 072 073 private final TableName tableName = TableName.valueOf(getClass().getSimpleName()); 074 075 private final byte[] family = Bytes.toBytes("f"); 076 077 private final byte[] qualifier = Bytes.toBytes("q"); 078 079 @Rule 080 public ExpectedException error = ExpectedException.none(); 081 082 private HStore getStoreWithName(TableName tableName) { 083 MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster(); 084 List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads(); 085 for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) { 086 HRegionServer hrs = rsts.get(i).getRegionServer(); 087 for (HRegion region : hrs.getRegions(tableName)) { 088 return region.getStores().iterator().next(); 089 } 090 } 091 return null; 092 } 093 094 private HStore prepareData() throws IOException { 095 Admin admin = TEST_UTIL.getAdmin(); 096 TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) 097 .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 098 FIFOCompactionPolicy.class.getName()) 099 .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 100 DisabledRegionSplitPolicy.class.getName()) 101 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build()) 102 .build(); 103 admin.createTable(desc); 104 Table table = TEST_UTIL.getConnection().getTable(tableName); 105 TimeOffsetEnvironmentEdge edge = 106 (TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate(); 107 for (int i = 0; i < 10; i++) { 108 for (int j = 0; j < 10; j++) { 109 byte[] value = new byte[128 * 1024]; 110 ThreadLocalRandom.current().nextBytes(value); 111 table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value)); 112 } 113 admin.flush(tableName); 114 edge.increment(1001); 115 } 116 return getStoreWithName(tableName); 117 } 118 119 @BeforeClass 120 public static void setEnvironmentEdge() throws Exception { 121 EnvironmentEdge ee = new TimeOffsetEnvironmentEdge(); 122 EnvironmentEdgeManager.injectEdge(ee); 123 Configuration conf = TEST_UTIL.getConfiguration(); 124 conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000); 125 TEST_UTIL.startMiniCluster(1); 126 } 127 128 @AfterClass 129 public static void resetEnvironmentEdge() throws Exception { 130 TEST_UTIL.shutdownMiniCluster(); 131 EnvironmentEdgeManager.reset(); 132 } 133 134 @Test 135 public void testPurgeExpiredFiles() throws Exception { 136 HStore store = prepareData(); 137 assertEquals(10, store.getStorefilesCount()); 138 TEST_UTIL.getAdmin().majorCompact(tableName); 139 TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() { 140 141 @Override 142 public boolean evaluate() throws Exception { 143 return store.getStorefilesCount() == 1; 144 } 145 146 @Override 147 public String explainFailure() throws Exception { 148 return "The store file count " + store.getStorefilesCount() + " is still greater than 1"; 149 } 150 }); 151 } 152 153 @Test 154 public void testSanityCheckTTL() throws IOException { 155 error.expect(DoNotRetryIOException.class); 156 error.expectMessage("Default TTL is not supported"); 157 TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-TTL"); 158 TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) 159 .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 160 FIFOCompactionPolicy.class.getName()) 161 .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 162 DisabledRegionSplitPolicy.class.getName()) 163 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); 164 TEST_UTIL.getAdmin().createTable(desc); 165 } 166 167 @Test 168 public void testSanityCheckMinVersion() throws IOException { 169 error.expect(DoNotRetryIOException.class); 170 error.expectMessage("MIN_VERSION > 0 is not supported for FIFO compaction"); 171 TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-MinVersion"); 172 TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) 173 .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 174 FIFOCompactionPolicy.class.getName()) 175 .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 176 DisabledRegionSplitPolicy.class.getName()) 177 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1) 178 .setMinVersions(1).build()) 179 .build(); 180 TEST_UTIL.getAdmin().createTable(desc); 181 } 182 183 @Test 184 public void testSanityCheckBlockingStoreFiles() throws IOException { 185 error.expect(DoNotRetryIOException.class); 186 error.expectMessage("Blocking file count 'hbase.hstore.blockingStoreFiles'"); 187 error.expectMessage("is below recommended minimum of 1000 for column family"); 188 TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-BlockingStoreFiles"); 189 TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) 190 .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 191 FIFOCompactionPolicy.class.getName()) 192 .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 193 DisabledRegionSplitPolicy.class.getName()) 194 .setValue(HStore.BLOCKING_STOREFILES_KEY, "10") 195 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build()) 196 .build(); 197 TEST_UTIL.getAdmin().createTable(desc); 198 } 199 200 /** 201 * Unit test for HBASE-21504 202 */ 203 @Test 204 public void testFIFOCompactionPolicyExpiredEmptyHFiles() throws Exception { 205 TableName tableName = TableName.valueOf("testFIFOCompactionPolicyExpiredEmptyHFiles"); 206 TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) 207 .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 208 FIFOCompactionPolicy.class.getName()) 209 .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 210 DisabledRegionSplitPolicy.class.getName()) 211 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build()) 212 .build(); 213 Table table = TEST_UTIL.createTable(desc, null); 214 long ts = System.currentTimeMillis() - 10 * 1000; 215 Put put = 216 new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, ts, Bytes.toBytes("value0")); 217 table.put(put); 218 TEST_UTIL.getAdmin().flush(tableName); // HFile-0 219 put = new Put(Bytes.toBytes("row2")).addColumn(family, qualifier, ts, Bytes.toBytes("value1")); 220 table.put(put); 221 final int testWaitTimeoutMs = 20000; 222 TEST_UTIL.getAdmin().flush(tableName); // HFile-1 223 224 HStore store = Preconditions.checkNotNull(getStoreWithName(tableName)); 225 Assert.assertEquals(2, store.getStorefilesCount()); 226 227 TEST_UTIL.getAdmin().majorCompact(tableName); 228 TEST_UTIL.waitFor(testWaitTimeoutMs, 229 (Waiter.Predicate<Exception>) () -> store.getStorefilesCount() == 1); 230 231 Assert.assertEquals(1, store.getStorefilesCount()); 232 HStoreFile sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next()); 233 Assert.assertEquals(0, sf.getReader().getEntries()); 234 235 put = new Put(Bytes.toBytes("row3")).addColumn(family, qualifier, ts, Bytes.toBytes("value1")); 236 table.put(put); 237 TEST_UTIL.getAdmin().flush(tableName); // HFile-2 238 Assert.assertEquals(2, store.getStorefilesCount()); 239 240 TEST_UTIL.getAdmin().majorCompact(tableName); 241 TEST_UTIL.waitFor(testWaitTimeoutMs, 242 (Waiter.Predicate<Exception>) () -> store.getStorefilesCount() == 1); 243 244 Assert.assertEquals(1, store.getStorefilesCount()); 245 sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next()); 246 Assert.assertEquals(0, sf.getReader().getEntries()); 247 } 248}