001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master.region; 019 020import static org.junit.jupiter.api.Assertions.assertEquals; 021import static org.junit.jupiter.api.Assertions.assertFalse; 022 023import java.io.FileNotFoundException; 024import java.io.IOException; 025import org.apache.hadoop.conf.Configuration; 026import org.apache.hadoop.fs.FileStatus; 027import org.apache.hadoop.fs.FileSystem; 028import org.apache.hadoop.fs.Path; 029import org.apache.hadoop.hbase.Stoppable; 030import org.apache.hadoop.hbase.client.Put; 031import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; 032import org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner; 033import org.apache.hadoop.hbase.regionserver.Store; 034import org.apache.hadoop.hbase.testclassification.MasterTests; 035import org.apache.hadoop.hbase.testclassification.MediumTests; 036import org.apache.hadoop.hbase.util.Bytes; 037import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 038import org.apache.hadoop.hbase.util.HFileArchiveUtil; 039import org.junit.jupiter.api.Tag; 040import org.junit.jupiter.api.Test; 041 042@Tag(MasterTests.TAG) 043@Tag(MediumTests.TAG) 044public class TestMasterRegionCompaction extends MasterRegionTestBase { 045 046 private int compactMin = 4; 047 048 private HFileCleaner hfileCleaner; 049 050 @Override 051 protected void configure(MasterRegionParams params) { 052 params.compactMin(compactMin); 053 } 054 055 @Override 056 protected void postSetUp() throws IOException { 057 Configuration conf = htu.getConfiguration(); 058 conf.setLong(TimeToLiveMasterLocalStoreHFileCleaner.TTL_CONF_KEY, 5000); 059 Path testDir = htu.getDataTestDir(); 060 FileSystem fs = testDir.getFileSystem(conf); 061 Path globalArchivePath = HFileArchiveUtil.getArchivePath(conf); 062 hfileCleaner = new HFileCleaner(500, new Stoppable() { 063 064 private volatile boolean stopped = false; 065 066 @Override 067 public void stop(String why) { 068 stopped = true; 069 } 070 071 @Override 072 public boolean isStopped() { 073 return stopped; 074 } 075 }, conf, fs, globalArchivePath, hfileCleanerPool); 076 choreService.scheduleChore(hfileCleaner); 077 } 078 079 private int getStorefilesCount() { 080 return region.region.getStores().stream().mapToInt(Store::getStorefilesCount).sum(); 081 } 082 083 private void assertFileCount(FileSystem fs, Path storeArchiveDir, int expected) 084 throws IOException { 085 FileStatus[] compactedHFiles = fs.listStatus(storeArchiveDir); 086 assertEquals(expected, compactedHFiles.length); 087 } 088 089 @Test 090 public void test() throws IOException, InterruptedException { 091 for (int i = 0; i < compactMin - 1; i++) { 092 final int index = i; 093 region.update( 094 r -> r.put(new Put(Bytes.toBytes(index)).addColumn(CF1, QUALIFIER, Bytes.toBytes(index)) 095 .addColumn(CF2, QUALIFIER, Bytes.toBytes(index)))); 096 region.flush(true); 097 } 098 assertEquals(2 * (compactMin - 1), getStorefilesCount()); 099 region.update(r -> r.put(new Put(Bytes.toBytes(compactMin - 1)).addColumn(CF1, QUALIFIER, 100 Bytes.toBytes(compactMin - 1)))); 101 region.flusherAndCompactor.requestFlush(); 102 htu.waitFor(15000, () -> getStorefilesCount() == 2); 103 Path store1ArchiveDir = HFileArchiveUtil.getStoreArchivePathForRootDir(htu.getDataTestDir(), 104 region.region.getRegionInfo(), CF1); 105 Path store2ArchiveDir = HFileArchiveUtil.getStoreArchivePathForRootDir(htu.getDataTestDir(), 106 region.region.getRegionInfo(), CF2); 107 FileSystem fs = store1ArchiveDir.getFileSystem(htu.getConfiguration()); 108 // after compaction, the old hfiles should have been compacted 109 htu.waitFor(15000, () -> { 110 try { 111 FileStatus[] fses1 = fs.listStatus(store1ArchiveDir); 112 FileStatus[] fses2 = fs.listStatus(store2ArchiveDir); 113 return fses1 != null && fses1.length == compactMin && fses2 != null 114 && fses2.length == compactMin - 1; 115 } catch (FileNotFoundException e) { 116 return false; 117 } 118 }); 119 // ttl has not expired, so should not delete any files 120 Thread.sleep(1000); 121 FileStatus[] compactedHFiles = fs.listStatus(store1ArchiveDir); 122 assertEquals(compactMin, compactedHFiles.length); 123 assertFileCount(fs, store2ArchiveDir, compactMin - 1); 124 Thread.sleep(2000); 125 // touch one file 126 127 long currentTime = EnvironmentEdgeManager.currentTime(); 128 fs.setTimes(compactedHFiles[0].getPath(), currentTime, currentTime); 129 Thread.sleep(3000); 130 // only the touched file is still there after clean up 131 FileStatus[] remainingHFiles = fs.listStatus(store1ArchiveDir); 132 assertEquals(1, remainingHFiles.length); 133 assertEquals(compactedHFiles[0].getPath(), remainingHFiles[0].getPath()); 134 assertFalse(fs.exists(store2ArchiveDir)); 135 Thread.sleep(6000); 136 // the touched file should also be cleaned up and then the cleaner will delete the parent 137 // directory since it is empty. 138 assertFalse(fs.exists(store1ArchiveDir)); 139 } 140}