001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver.compactions;
019
020import static org.junit.Assert.assertEquals;
021
022import java.io.IOException;
023import java.util.List;
024import java.util.concurrent.ThreadLocalRandom;
025
026import org.apache.hadoop.conf.Configuration;
027import org.apache.hadoop.hbase.DoNotRetryIOException;
028import org.apache.hadoop.hbase.HBaseClassTestRule;
029import org.apache.hadoop.hbase.HBaseTestingUtility;
030import org.apache.hadoop.hbase.HConstants;
031import org.apache.hadoop.hbase.MiniHBaseCluster;
032import org.apache.hadoop.hbase.TableName;
033import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
034import org.apache.hadoop.hbase.client.Admin;
035import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
036import org.apache.hadoop.hbase.client.Put;
037import org.apache.hadoop.hbase.client.Table;
038import org.apache.hadoop.hbase.client.TableDescriptor;
039import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
040import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
041import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
042import org.apache.hadoop.hbase.regionserver.HRegion;
043import org.apache.hadoop.hbase.regionserver.HRegionServer;
044import org.apache.hadoop.hbase.regionserver.HStore;
045import org.apache.hadoop.hbase.regionserver.HStoreFile;
046import org.apache.hadoop.hbase.testclassification.MediumTests;
047import org.apache.hadoop.hbase.testclassification.RegionServerTests;
048import org.apache.hadoop.hbase.util.Bytes;
049import org.apache.hadoop.hbase.util.EnvironmentEdge;
050import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
051import org.apache.hadoop.hbase.util.JVMClusterUtil;
052import org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge;
053import org.junit.AfterClass;
054import org.junit.Assert;
055import org.junit.BeforeClass;
056import org.junit.ClassRule;
057import org.junit.Rule;
058import org.junit.Test;
059import org.junit.experimental.categories.Category;
060import org.junit.rules.ExpectedException;
061
062@Category({ RegionServerTests.class, MediumTests.class })
063public class TestFIFOCompactionPolicy {
064
065  @ClassRule
066  public static final HBaseClassTestRule CLASS_RULE =
067      HBaseClassTestRule.forClass(TestFIFOCompactionPolicy.class);
068
069  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
070
071  private final TableName tableName = TableName.valueOf(getClass().getSimpleName());
072
073  private final byte[] family = Bytes.toBytes("f");
074
075  private final byte[] qualifier = Bytes.toBytes("q");
076
077  @Rule
078  public ExpectedException error = ExpectedException.none();
079
080  private HStore getStoreWithName(TableName tableName) {
081    MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
082    List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
083    for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
084      HRegionServer hrs = rsts.get(i).getRegionServer();
085      for (HRegion region : hrs.getRegions(tableName)) {
086        return region.getStores().iterator().next();
087      }
088    }
089    return null;
090  }
091
092  private HStore prepareData() throws IOException {
093    Admin admin = TEST_UTIL.getAdmin();
094    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
095        .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
096          FIFOCompactionPolicy.class.getName())
097        .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
098          DisabledRegionSplitPolicy.class.getName())
099        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
100        .build();
101    admin.createTable(desc);
102    Table table = TEST_UTIL.getConnection().getTable(tableName);
103    TimeOffsetEnvironmentEdge edge =
104        (TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate();
105    for (int i = 0; i < 10; i++) {
106      for (int j = 0; j < 10; j++) {
107        byte[] value = new byte[128 * 1024];
108        ThreadLocalRandom.current().nextBytes(value);
109        table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
110      }
111      admin.flush(tableName);
112      edge.increment(1001);
113    }
114    return getStoreWithName(tableName);
115  }
116
117  @BeforeClass
118  public static void setEnvironmentEdge() throws Exception {
119    EnvironmentEdge ee = new TimeOffsetEnvironmentEdge();
120    EnvironmentEdgeManager.injectEdge(ee);
121    Configuration conf = TEST_UTIL.getConfiguration();
122    conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
123    TEST_UTIL.startMiniCluster(1);
124  }
125
126  @AfterClass
127  public static void resetEnvironmentEdge() throws Exception {
128    TEST_UTIL.shutdownMiniCluster();
129    EnvironmentEdgeManager.reset();
130  }
131
132  @Test
133  public void testPurgeExpiredFiles() throws Exception {
134    HStore store = prepareData();
135    assertEquals(10, store.getStorefilesCount());
136    TEST_UTIL.getAdmin().majorCompact(tableName);
137    TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
138
139      @Override
140      public boolean evaluate() throws Exception {
141        return store.getStorefilesCount() == 1;
142      }
143
144      @Override
145      public String explainFailure() throws Exception {
146        return "The store file count " + store.getStorefilesCount() + " is still greater than 1";
147      }
148    });
149  }
150
151  @Test
152  public void testSanityCheckTTL() throws IOException {
153    error.expect(DoNotRetryIOException.class);
154    error.expectMessage("Default TTL is not supported");
155    TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-TTL");
156    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
157        .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
158          FIFOCompactionPolicy.class.getName())
159        .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
160          DisabledRegionSplitPolicy.class.getName())
161        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
162    TEST_UTIL.getAdmin().createTable(desc);
163  }
164
165  @Test
166  public void testSanityCheckMinVersion() throws IOException {
167    error.expect(DoNotRetryIOException.class);
168    error.expectMessage("MIN_VERSION > 0 is not supported for FIFO compaction");
169    TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-MinVersion");
170    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
171        .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
172          FIFOCompactionPolicy.class.getName())
173        .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
174          DisabledRegionSplitPolicy.class.getName())
175        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1)
176            .setMinVersions(1).build())
177        .build();
178    TEST_UTIL.getAdmin().createTable(desc);
179  }
180
181  @Test
182  public void testSanityCheckBlockingStoreFiles() throws IOException {
183    error.expect(DoNotRetryIOException.class);
184    error.expectMessage("Blocking file count 'hbase.hstore.blockingStoreFiles'");
185    error.expectMessage("is below recommended minimum of 1000 for column family");
186    TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-BlockingStoreFiles");
187    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
188        .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
189          FIFOCompactionPolicy.class.getName())
190        .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
191          DisabledRegionSplitPolicy.class.getName())
192        .setValue(HStore.BLOCKING_STOREFILES_KEY, "10")
193        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
194        .build();
195    TEST_UTIL.getAdmin().createTable(desc);
196  }
197
198  /**
199   * Unit test for HBASE-21504
200   */
201  @Test
202  public void testFIFOCompactionPolicyExpiredEmptyHFiles() throws Exception {
203    TableName tableName = TableName.valueOf("testFIFOCompactionPolicyExpiredEmptyHFiles");
204    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
205        .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
206          FIFOCompactionPolicy.class.getName())
207        .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
208          DisabledRegionSplitPolicy.class.getName())
209        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
210        .build();
211    Table table = TEST_UTIL.createTable(desc, null);
212    long ts = System.currentTimeMillis() - 10 * 1000;
213    Put put =
214        new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, ts, Bytes.toBytes("value0"));
215    table.put(put);
216    TEST_UTIL.getAdmin().flush(tableName); // HFile-0
217    put = new Put(Bytes.toBytes("row2")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
218    table.put(put);
219    TEST_UTIL.getAdmin().flush(tableName); // HFile-1
220
221    HStore store = getStoreWithName(tableName);
222    Assert.assertNotNull(store);
223    Assert.assertEquals(2, store.getStorefilesCount());
224
225    TEST_UTIL.getAdmin().majorCompact(tableName);
226    for (int i = 0; i < 100; i++) {
227      if (store.getStorefilesCount() > 1) {
228        Thread.sleep(100);
229      } else {
230        break;
231      }
232    }
233    Assert.assertEquals(1, store.getStorefilesCount());
234    HStoreFile sf = store.getStorefiles().iterator().next();
235    Assert.assertNotNull(sf);
236    Assert.assertEquals(0, sf.getReader().getEntries());
237
238    put = new Put(Bytes.toBytes("row3")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
239    table.put(put);
240    TEST_UTIL.getAdmin().flush(tableName); // HFile-2
241    Assert.assertEquals(2, store.getStorefilesCount());
242
243    TEST_UTIL.getAdmin().majorCompact(tableName);
244    for (int i = 0; i < 100; i++) {
245      if (store.getStorefilesCount() > 1) {
246        Thread.sleep(100);
247      } else {
248        break;
249      }
250    }
251
252    Assert.assertEquals(1, store.getStorefilesCount());
253    sf = store.getStorefiles().iterator().next();
254    Assert.assertNotNull(sf);
255    Assert.assertEquals(0, sf.getReader().getEntries());
256  }
257}