001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import org.apache.hadoop.hbase.HBaseClassTestRule;
021import org.apache.hadoop.hbase.HBaseTestingUtility;
022import org.apache.hadoop.hbase.HConstants;
023import org.apache.hadoop.hbase.TableName;
024import org.apache.hadoop.hbase.client.Admin;
025import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
026import org.apache.hadoop.hbase.client.Put;
027import org.apache.hadoop.hbase.client.Scan;
028import org.apache.hadoop.hbase.client.Table;
029import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
030import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
031import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
032import org.apache.hadoop.hbase.testclassification.LargeTests;
033import org.apache.hadoop.hbase.testclassification.RegionServerTests;
034import org.apache.hadoop.hbase.util.Bytes;
035import org.apache.hadoop.hbase.util.JVMClusterUtil;
036import org.junit.After;
037import org.junit.AfterClass;
038import org.junit.Before;
039import org.junit.BeforeClass;
040import org.junit.ClassRule;
041import org.junit.Test;
042import org.junit.experimental.categories.Category;
043import org.slf4j.Logger;
044import org.slf4j.LoggerFactory;
045
046import java.util.ArrayList;
047import java.util.List;
048
049import static org.junit.Assert.assertEquals;
050
051@Category({ LargeTests.class, RegionServerTests.class })
052public class TestNotCleanupCompactedFileWhenRegionWarmup {
053  private static final Logger LOG =
054      LoggerFactory.getLogger(TestNotCleanupCompactedFileWhenRegionWarmup.class);
055
056  @ClassRule
057  public static final HBaseClassTestRule CLASS_RULE =
058      HBaseClassTestRule.forClass(TestNotCleanupCompactedFileWhenRegionWarmup.class);
059
060  private static HBaseTestingUtility TEST_UTIL;
061  private static Admin admin;
062  private static Table table;
063
064  private static TableName TABLE_NAME = TableName.valueOf("TestCleanupCompactedFileAfterFailover");
065  private static byte[] ROW = Bytes.toBytes("row");
066  private static byte[] FAMILY = Bytes.toBytes("cf");
067  private static byte[] QUALIFIER = Bytes.toBytes("cq");
068  private static byte[] VALUE = Bytes.toBytes("value");
069
070  @BeforeClass
071  public static void beforeClass() throws Exception {
072    TEST_UTIL = new HBaseTestingUtility();
073    // Set the scanner lease to 20min, so the scanner can't be closed by RegionServer
074    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 1200000);
075    TEST_UTIL.getConfiguration()
076        .setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
077    TEST_UTIL.getConfiguration().set("dfs.blocksize", "64000");
078    TEST_UTIL.getConfiguration().set("dfs.namenode.fs-limits.min-block-size", "1024");
079    TEST_UTIL.getConfiguration().set(TimeToLiveHFileCleaner.TTL_CONF_KEY, "0");
080    TEST_UTIL.startMiniCluster(1);
081    admin = TEST_UTIL.getAdmin();
082  }
083
084  @AfterClass
085  public static void afterClass() throws Exception {
086    TEST_UTIL.shutdownMiniCluster();
087  }
088
089  @Before
090  public void before() throws Exception {
091    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TABLE_NAME);
092    builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
093    admin.createTable(builder.build());
094    TEST_UTIL.waitTableAvailable(TABLE_NAME);
095    table = TEST_UTIL.getConnection().getTable(TABLE_NAME);
096  }
097
098  @After
099  public void after() throws Exception {
100    admin.disableTable(TABLE_NAME);
101    admin.deleteTable(TABLE_NAME);
102  }
103
104  @Test
105  public void testRegionWarmup() throws Exception {
106    List<HRegion> regions = new ArrayList<>();
107    for (JVMClusterUtil.RegionServerThread rsThread : TEST_UTIL.getHBaseCluster()
108        .getLiveRegionServerThreads()) {
109      HRegionServer rs = rsThread.getRegionServer();
110      if (rs.getOnlineTables().contains(TABLE_NAME)) {
111        regions.addAll(rs.getRegions(TABLE_NAME));
112      }
113    }
114    assertEquals("Table should only have one region", 1, regions.size());
115    HRegion region = regions.get(0);
116    HStore store = region.getStore(FAMILY);
117
118    writeDataAndFlush(3, region);
119    assertEquals(3, store.getStorefilesCount());
120
121    // Open a scanner and not close, then the storefile will be referenced
122    store.getScanner(new Scan(), null, 0);
123    region.compact(true);
124    assertEquals(1, store.getStorefilesCount());
125    // The compacted file should not be archived as there are references by user scanner
126    assertEquals(3, store.getStoreEngine().getStoreFileManager().getCompactedfiles().size());
127
128    HStore newStore = region.instantiateHStore(ColumnFamilyDescriptorBuilder.of(FAMILY), true);
129    // Should not archive the compacted storefiles when region warmup
130    assertEquals(4, newStore.getStorefilesCount());
131
132    newStore = region.instantiateHStore(ColumnFamilyDescriptorBuilder.of(FAMILY), false);
133    // Archived the compacted storefiles when region real open
134    assertEquals(1, newStore.getStorefilesCount());
135  }
136
137  private void writeDataAndFlush(int fileNum, HRegion region) throws Exception {
138    for (int i = 0; i < fileNum; i++) {
139      for (int j = 0; j < 100; j++) {
140        table.put(new Put(concat(ROW, j)).addColumn(FAMILY, QUALIFIER, concat(VALUE, j)));
141      }
142      region.flush(true);
143    }
144  }
145
146  private byte[] concat(byte[] base, int index) {
147    return Bytes.toBytes(Bytes.toString(base) + "-" + index);
148  }
149}