001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver.compactions;
019
020import static org.junit.Assert.assertEquals;
021
022import java.io.IOException;
023import java.util.List;
024import org.apache.hadoop.conf.Configuration;
025import org.apache.hadoop.hbase.DoNotRetryIOException;
026import org.apache.hadoop.hbase.HBaseClassTestRule;
027import org.apache.hadoop.hbase.HBaseTestingUtility;
028import org.apache.hadoop.hbase.HConstants;
029import org.apache.hadoop.hbase.MiniHBaseCluster;
030import org.apache.hadoop.hbase.TableName;
031import org.apache.hadoop.hbase.Waiter;
032import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
033import org.apache.hadoop.hbase.client.Admin;
034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
035import org.apache.hadoop.hbase.client.Put;
036import org.apache.hadoop.hbase.client.Table;
037import org.apache.hadoop.hbase.client.TableDescriptor;
038import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
039import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
040import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
041import org.apache.hadoop.hbase.regionserver.HRegion;
042import org.apache.hadoop.hbase.regionserver.HRegionServer;
043import org.apache.hadoop.hbase.regionserver.HStore;
044import org.apache.hadoop.hbase.regionserver.HStoreFile;
045import org.apache.hadoop.hbase.testclassification.MediumTests;
046import org.apache.hadoop.hbase.testclassification.RegionServerTests;
047import org.apache.hadoop.hbase.util.Bytes;
048import org.apache.hadoop.hbase.util.EnvironmentEdge;
049import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
050import org.apache.hadoop.hbase.util.JVMClusterUtil;
051import org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge;
052import org.junit.AfterClass;
053import org.junit.Assert;
054import org.junit.BeforeClass;
055import org.junit.ClassRule;
056import org.junit.Rule;
057import org.junit.Test;
058import org.junit.experimental.categories.Category;
059import org.junit.rules.ExpectedException;
060
061import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
062
063@Category({ RegionServerTests.class, MediumTests.class })
064public class TestFIFOCompactionPolicy {
065
066  @ClassRule
067  public static final HBaseClassTestRule CLASS_RULE =
068    HBaseClassTestRule.forClass(TestFIFOCompactionPolicy.class);
069
070  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
071
072  private final TableName tableName = TableName.valueOf(getClass().getSimpleName());
073
074  private final byte[] family = Bytes.toBytes("f");
075
076  private final byte[] qualifier = Bytes.toBytes("q");
077
078  @Rule
079  public ExpectedException error = ExpectedException.none();
080
081  private HStore getStoreWithName(TableName tableName) {
082    MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
083    List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
084    for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
085      HRegionServer hrs = rsts.get(i).getRegionServer();
086      for (HRegion region : hrs.getRegions(tableName)) {
087        return region.getStores().iterator().next();
088      }
089    }
090    return null;
091  }
092
093  private HStore prepareData() throws IOException {
094    Admin admin = TEST_UTIL.getAdmin();
095    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
096      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
097        FIFOCompactionPolicy.class.getName())
098      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, DisabledRegionSplitPolicy.class.getName())
099      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
100      .build();
101    admin.createTable(desc);
102    Table table = TEST_UTIL.getConnection().getTable(tableName);
103    TimeOffsetEnvironmentEdge edge =
104      (TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate();
105    for (int i = 0; i < 10; i++) {
106      for (int j = 0; j < 10; j++) {
107        byte[] value = new byte[128 * 1024];
108        Bytes.random(value);
109        table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
110      }
111      admin.flush(tableName);
112      edge.increment(1001);
113    }
114    return getStoreWithName(tableName);
115  }
116
117  @BeforeClass
118  public static void setEnvironmentEdge() throws Exception {
119    EnvironmentEdge ee = new TimeOffsetEnvironmentEdge();
120    EnvironmentEdgeManager.injectEdge(ee);
121    Configuration conf = TEST_UTIL.getConfiguration();
122    conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
123    // Expired store file deletion during compaction optimization interferes with the FIFO
124    // compaction policy. The race causes changes to in-flight-compaction files resulting in a
125    // non-deterministic number of files selected by compaction policy. Disables that optimization
126    // for this test run.
127    conf.setBoolean("hbase.store.delete.expired.storefile", false);
128    TEST_UTIL.startMiniCluster(1);
129  }
130
131  @AfterClass
132  public static void resetEnvironmentEdge() throws Exception {
133    TEST_UTIL.shutdownMiniCluster();
134    EnvironmentEdgeManager.reset();
135  }
136
137  @Test
138  public void testPurgeExpiredFiles() throws Exception {
139    HStore store = prepareData();
140    assertEquals(10, store.getStorefilesCount());
141    TEST_UTIL.getAdmin().majorCompact(tableName);
142    TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
143
144      @Override
145      public boolean evaluate() throws Exception {
146        return store.getStorefilesCount() == 1;
147      }
148
149      @Override
150      public String explainFailure() throws Exception {
151        return "The store file count " + store.getStorefilesCount() + " is still greater than 1";
152      }
153    });
154  }
155
156  @Test
157  public void testSanityCheckTTL() throws IOException {
158    error.expect(DoNotRetryIOException.class);
159    error.expectMessage("Default TTL is not supported");
160    TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-TTL");
161    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
162      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
163        FIFOCompactionPolicy.class.getName())
164      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, DisabledRegionSplitPolicy.class.getName())
165      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
166    TEST_UTIL.getAdmin().createTable(desc);
167  }
168
169  @Test
170  public void testSanityCheckMinVersion() throws IOException {
171    error.expect(DoNotRetryIOException.class);
172    error.expectMessage("MIN_VERSION > 0 is not supported for FIFO compaction");
173    TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-MinVersion");
174    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
175      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
176        FIFOCompactionPolicy.class.getName())
177      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, DisabledRegionSplitPolicy.class.getName())
178      .setColumnFamily(
179        ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).setMinVersions(1).build())
180      .build();
181    TEST_UTIL.getAdmin().createTable(desc);
182  }
183
184  @Test
185  public void testSanityCheckBlockingStoreFiles() throws IOException {
186    error.expect(DoNotRetryIOException.class);
187    error.expectMessage("Blocking file count 'hbase.hstore.blockingStoreFiles'");
188    error.expectMessage("is below recommended minimum of 1000 for column family");
189    TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-BlockingStoreFiles");
190    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
191      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
192        FIFOCompactionPolicy.class.getName())
193      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, DisabledRegionSplitPolicy.class.getName())
194      .setValue(HStore.BLOCKING_STOREFILES_KEY, "10")
195      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
196      .build();
197    TEST_UTIL.getAdmin().createTable(desc);
198  }
199
200  /**
201   * Unit test for HBASE-21504
202   */
203  @Test
204  public void testFIFOCompactionPolicyExpiredEmptyHFiles() throws Exception {
205    TableName tableName = TableName.valueOf("testFIFOCompactionPolicyExpiredEmptyHFiles");
206    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
207      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
208        FIFOCompactionPolicy.class.getName())
209      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, DisabledRegionSplitPolicy.class.getName())
210      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
211      .build();
212    Table table = TEST_UTIL.createTable(desc, null);
213    long ts = EnvironmentEdgeManager.currentTime() - 10 * 1000;
214    Put put =
215      new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, ts, Bytes.toBytes("value0"));
216    table.put(put);
217    TEST_UTIL.getAdmin().flush(tableName); // HFile-0
218    put = new Put(Bytes.toBytes("row2")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
219    table.put(put);
220    final int testWaitTimeoutMs = 20000;
221    TEST_UTIL.getAdmin().flush(tableName); // HFile-1
222
223    HStore store = Preconditions.checkNotNull(getStoreWithName(tableName));
224    Assert.assertEquals(2, store.getStorefilesCount());
225
226    TEST_UTIL.getAdmin().majorCompact(tableName);
227    TEST_UTIL.waitFor(testWaitTimeoutMs,
228      (Waiter.Predicate<Exception>) () -> store.getStorefilesCount() == 1);
229
230    Assert.assertEquals(1, store.getStorefilesCount());
231    HStoreFile sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next());
232    Assert.assertEquals(0, sf.getReader().getEntries());
233
234    put = new Put(Bytes.toBytes("row3")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
235    table.put(put);
236    TEST_UTIL.getAdmin().flush(tableName); // HFile-2
237    Assert.assertEquals(2, store.getStorefilesCount());
238
239    TEST_UTIL.getAdmin().majorCompact(tableName);
240    TEST_UTIL.waitFor(testWaitTimeoutMs,
241      (Waiter.Predicate<Exception>) () -> store.getStorefilesCount() == 1);
242
243    Assert.assertEquals(1, store.getStorefilesCount());
244    sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next());
245    Assert.assertEquals(0, sf.getReader().getEntries());
246  }
247}