001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master.region;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertFalse;
022
023import java.io.FileNotFoundException;
024import java.io.IOException;
025import org.apache.hadoop.conf.Configuration;
026import org.apache.hadoop.fs.FileStatus;
027import org.apache.hadoop.fs.FileSystem;
028import org.apache.hadoop.fs.Path;
029import org.apache.hadoop.hbase.HBaseClassTestRule;
030import org.apache.hadoop.hbase.Stoppable;
031import org.apache.hadoop.hbase.client.Put;
032import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
033import org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner;
034import org.apache.hadoop.hbase.regionserver.Store;
035import org.apache.hadoop.hbase.testclassification.MasterTests;
036import org.apache.hadoop.hbase.testclassification.MediumTests;
037import org.apache.hadoop.hbase.util.Bytes;
038import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
039import org.apache.hadoop.hbase.util.HFileArchiveUtil;
040import org.junit.ClassRule;
041import org.junit.Test;
042import org.junit.experimental.categories.Category;
043
044@Category({ MasterTests.class, MediumTests.class })
045public class TestMasterRegionCompaction extends MasterRegionTestBase {
046
047  @ClassRule
048  public static final HBaseClassTestRule CLASS_RULE =
049    HBaseClassTestRule.forClass(TestMasterRegionCompaction.class);
050
051  private int compactMin = 4;
052
053  private HFileCleaner hfileCleaner;
054
055  @Override
056  protected void configure(MasterRegionParams params) {
057    params.compactMin(compactMin);
058  }
059
060  @Override
061  protected void postSetUp() throws IOException {
062    Configuration conf = htu.getConfiguration();
063    conf.setLong(TimeToLiveMasterLocalStoreHFileCleaner.TTL_CONF_KEY, 5000);
064    Path testDir = htu.getDataTestDir();
065    FileSystem fs = testDir.getFileSystem(conf);
066    Path globalArchivePath = HFileArchiveUtil.getArchivePath(conf);
067    hfileCleaner = new HFileCleaner(500, new Stoppable() {
068
069      private volatile boolean stopped = false;
070
071      @Override
072      public void stop(String why) {
073        stopped = true;
074      }
075
076      @Override
077      public boolean isStopped() {
078        return stopped;
079      }
080    }, conf, fs, globalArchivePath, hfileCleanerPool);
081    choreService.scheduleChore(hfileCleaner);
082  }
083
084  private int getStorefilesCount() {
085    return region.region.getStores().stream().mapToInt(Store::getStorefilesCount).sum();
086  }
087
088  private void assertFileCount(FileSystem fs, Path storeArchiveDir, int expected)
089    throws IOException {
090    FileStatus[] compactedHFiles = fs.listStatus(storeArchiveDir);
091    assertEquals(expected, compactedHFiles.length);
092  }
093
094  @Test
095  public void test() throws IOException, InterruptedException {
096    for (int i = 0; i < compactMin - 1; i++) {
097      final int index = i;
098      region.update(
099        r -> r.put(new Put(Bytes.toBytes(index)).addColumn(CF1, QUALIFIER, Bytes.toBytes(index))
100          .addColumn(CF2, QUALIFIER, Bytes.toBytes(index))));
101      region.flush(true);
102    }
103    assertEquals(2 * (compactMin - 1), getStorefilesCount());
104    region.update(r -> r.put(new Put(Bytes.toBytes(compactMin - 1)).addColumn(CF1, QUALIFIER,
105      Bytes.toBytes(compactMin - 1))));
106    region.flusherAndCompactor.requestFlush();
107    htu.waitFor(15000, () -> getStorefilesCount() == 2);
108    Path store1ArchiveDir = HFileArchiveUtil.getStoreArchivePathForRootDir(htu.getDataTestDir(),
109      region.region.getRegionInfo(), CF1);
110    Path store2ArchiveDir = HFileArchiveUtil.getStoreArchivePathForRootDir(htu.getDataTestDir(),
111      region.region.getRegionInfo(), CF2);
112    FileSystem fs = store1ArchiveDir.getFileSystem(htu.getConfiguration());
113    // after compaction, the old hfiles should have been compacted
114    htu.waitFor(15000, () -> {
115      try {
116        FileStatus[] fses1 = fs.listStatus(store1ArchiveDir);
117        FileStatus[] fses2 = fs.listStatus(store2ArchiveDir);
118        return fses1 != null && fses1.length == compactMin && fses2 != null
119          && fses2.length == compactMin - 1;
120      } catch (FileNotFoundException e) {
121        return false;
122      }
123    });
124    // ttl has not expired, so should not delete any files
125    Thread.sleep(1000);
126    FileStatus[] compactedHFiles = fs.listStatus(store1ArchiveDir);
127    assertEquals(compactMin, compactedHFiles.length);
128    assertFileCount(fs, store2ArchiveDir, compactMin - 1);
129    Thread.sleep(2000);
130    // touch one file
131
132    long currentTime = EnvironmentEdgeManager.currentTime();
133    fs.setTimes(compactedHFiles[0].getPath(), currentTime, currentTime);
134    Thread.sleep(3000);
135    // only the touched file is still there after clean up
136    FileStatus[] remainingHFiles = fs.listStatus(store1ArchiveDir);
137    assertEquals(1, remainingHFiles.length);
138    assertEquals(compactedHFiles[0].getPath(), remainingHFiles[0].getPath());
139    assertFalse(fs.exists(store2ArchiveDir));
140    Thread.sleep(6000);
141    // the touched file should also be cleaned up and then the cleaner will delete the parent
142    // directory since it is empty.
143    assertFalse(fs.exists(store1ArchiveDir));
144  }
145}