001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master.region; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022 023import java.io.FileNotFoundException; 024import java.io.IOException; 025import org.apache.hadoop.conf.Configuration; 026import org.apache.hadoop.fs.FileStatus; 027import org.apache.hadoop.fs.FileSystem; 028import org.apache.hadoop.fs.Path; 029import org.apache.hadoop.hbase.HBaseClassTestRule; 030import org.apache.hadoop.hbase.Stoppable; 031import org.apache.hadoop.hbase.client.Put; 032import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; 033import org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner; 034import org.apache.hadoop.hbase.regionserver.Store; 035import org.apache.hadoop.hbase.testclassification.MasterTests; 036import org.apache.hadoop.hbase.testclassification.MediumTests; 037import org.apache.hadoop.hbase.util.Bytes; 038import org.apache.hadoop.hbase.util.HFileArchiveUtil; 039import org.junit.ClassRule; 040import org.junit.Test; 041import org.junit.experimental.categories.Category; 042 043@Category({ MasterTests.class, MediumTests.class }) 044public class TestMasterRegionCompaction extends MasterRegionTestBase { 045 046 @ClassRule 047 public static final HBaseClassTestRule CLASS_RULE = 048 HBaseClassTestRule.forClass(TestMasterRegionCompaction.class); 049 050 private int compactMin = 4; 051 052 private HFileCleaner hfileCleaner; 053 054 @Override 055 protected void configure(MasterRegionParams params) { 056 params.compactMin(compactMin); 057 } 058 059 @Override 060 protected void postSetUp() throws IOException { 061 Configuration conf = htu.getConfiguration(); 062 conf.setLong(TimeToLiveMasterLocalStoreHFileCleaner.TTL_CONF_KEY, 5000); 063 Path testDir = htu.getDataTestDir(); 064 FileSystem fs = testDir.getFileSystem(conf); 065 Path globalArchivePath = HFileArchiveUtil.getArchivePath(conf); 066 hfileCleaner = new HFileCleaner(500, new Stoppable() { 067 068 private volatile boolean stopped = false; 069 070 @Override 071 public void stop(String why) { 072 stopped = true; 073 } 074 075 @Override 076 public boolean isStopped() { 077 return stopped; 078 } 079 }, conf, fs, globalArchivePath, cleanerPool); 080 choreService.scheduleChore(hfileCleaner); 081 } 082 083 private int getStorefilesCount() { 084 return region.region.getStores().stream().mapToInt(Store::getStorefilesCount).sum(); 085 } 086 087 private void assertFileCount(FileSystem fs, Path storeArchiveDir, int expected) 088 throws IOException { 089 FileStatus[] compactedHFiles = fs.listStatus(storeArchiveDir); 090 assertEquals(expected, compactedHFiles.length); 091 } 092 093 @Test 094 public void test() throws IOException, InterruptedException { 095 for (int i = 0; i < compactMin - 1; i++) { 096 final int index = i; 097 region.update( 098 r -> r.put(new Put(Bytes.toBytes(index)).addColumn(CF1, QUALIFIER, Bytes.toBytes(index)) 099 .addColumn(CF2, QUALIFIER, Bytes.toBytes(index)))); 100 region.flush(true); 101 } 102 assertEquals(2 * (compactMin - 1), getStorefilesCount()); 103 region.update(r -> r.put(new Put(Bytes.toBytes(compactMin - 1)).addColumn(CF1, QUALIFIER, 104 Bytes.toBytes(compactMin - 1)))); 105 region.flusherAndCompactor.requestFlush(); 106 htu.waitFor(15000, () -> getStorefilesCount() == 2); 107 Path store1ArchiveDir = HFileArchiveUtil.getStoreArchivePathForRootDir(htu.getDataTestDir(), 108 region.region.getRegionInfo(), CF1); 109 Path store2ArchiveDir = HFileArchiveUtil.getStoreArchivePathForRootDir(htu.getDataTestDir(), 110 region.region.getRegionInfo(), CF2); 111 FileSystem fs = store1ArchiveDir.getFileSystem(htu.getConfiguration()); 112 // after compaction, the old hfiles should have been compacted 113 htu.waitFor(15000, () -> { 114 try { 115 FileStatus[] fses1 = fs.listStatus(store1ArchiveDir); 116 FileStatus[] fses2 = fs.listStatus(store2ArchiveDir); 117 return fses1 != null && fses1.length == compactMin && fses2 != null && 118 fses2.length == compactMin - 1; 119 } catch (FileNotFoundException e) { 120 return false; 121 } 122 }); 123 // ttl has not expired, so should not delete any files 124 Thread.sleep(1000); 125 FileStatus[] compactedHFiles = fs.listStatus(store1ArchiveDir); 126 assertEquals(compactMin, compactedHFiles.length); 127 assertFileCount(fs, store2ArchiveDir, compactMin - 1); 128 Thread.sleep(2000); 129 // touch one file 130 131 long currentTime = System.currentTimeMillis(); 132 fs.setTimes(compactedHFiles[0].getPath(), currentTime, currentTime); 133 Thread.sleep(3000); 134 // only the touched file is still there after clean up 135 FileStatus[] remainingHFiles = fs.listStatus(store1ArchiveDir); 136 assertEquals(1, remainingHFiles.length); 137 assertEquals(compactedHFiles[0].getPath(), remainingHFiles[0].getPath()); 138 assertFalse(fs.exists(store2ArchiveDir)); 139 Thread.sleep(6000); 140 // the touched file should also be cleaned up and then the cleaner will delete the parent 141 // directory since it is empty. 142 assertFalse(fs.exists(store1ArchiveDir)); 143 } 144}