001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019package org.apache.hadoop.hbase.regionserver; 020 021import java.io.IOException; 022import java.util.ArrayList; 023import java.util.Arrays; 024import java.util.List; 025import org.apache.hadoop.conf.Configuration; 026import org.apache.hadoop.fs.FileSystem; 027import org.apache.hadoop.fs.Path; 028import org.apache.hadoop.hbase.HBaseTestingUtility; 029import org.apache.hadoop.hbase.HColumnDescriptor; 030import org.apache.hadoop.hbase.HConstants; 031import org.apache.hadoop.hbase.HRegionInfo; 032import org.apache.hadoop.hbase.HTableDescriptor; 033import org.apache.hadoop.hbase.TableName; 034import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; 035import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; 036import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; 037import org.apache.hadoop.hbase.regionserver.wal.FSHLog; 038import org.apache.hadoop.hbase.util.Bytes; 039import org.apache.hadoop.hbase.util.CommonFSUtils; 040import org.junit.After; 041import org.junit.Assert; 042import org.junit.Before; 043import org.slf4j.Logger; 044import org.slf4j.LoggerFactory; 045 046import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 047 048public class TestCompactionPolicy { 049 private final static Logger LOG = LoggerFactory.getLogger(TestCompactionPolicy.class); 050 protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 051 052 protected Configuration conf; 053 protected HStore store; 054 private static final String DIR = TEST_UTIL.getDataTestDir( 055 TestCompactionPolicy.class.getSimpleName()).toString(); 056 protected static Path TEST_FILE; 057 protected static final int minFiles = 3; 058 protected static final int maxFiles = 5; 059 060 protected static final long minSize = 10; 061 protected static final long maxSize = 2100; 062 063 private FSHLog hlog; 064 private HRegion region; 065 066 @Before 067 public void setUp() throws Exception { 068 config(); 069 initialize(); 070 } 071 072 /** 073 * setup config values necessary for store 074 */ 075 protected void config() { 076 this.conf = TEST_UTIL.getConfiguration(); 077 this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0); 078 this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, minFiles); 079 this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, maxFiles); 080 this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, minSize); 081 this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, maxSize); 082 this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.0F); 083 } 084 085 /** 086 * Setting up a Store 087 * @throws IOException with error 088 */ 089 protected void initialize() throws IOException { 090 Path basedir = new Path(DIR); 091 String logName = "logs"; 092 Path logdir = new Path(DIR, logName); 093 HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family")); 094 FileSystem fs = FileSystem.get(conf); 095 096 fs.delete(logdir, true); 097 098 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table"))); 099 htd.addFamily(hcd); 100 HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); 101 102 hlog = new FSHLog(fs, basedir, logName, conf); 103 ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 104 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); 105 region = HRegion.createHRegion(info, basedir, conf, htd, hlog); 106 region.close(); 107 Path tableDir = CommonFSUtils.getTableDir(basedir, htd.getTableName()); 108 region = new HRegion(tableDir, hlog, fs, conf, info, htd, null); 109 store = new HStore(region, hcd, conf, false); 110 111 TEST_FILE = region.getRegionFileSystem().createTempName(); 112 fs.createNewFile(TEST_FILE); 113 } 114 115 @After 116 public void tearDown() throws IOException { 117 IOException ex = null; 118 try { 119 region.close(); 120 } catch (IOException e) { 121 LOG.warn("Caught Exception", e); 122 ex = e; 123 } 124 try { 125 hlog.close(); 126 } catch (IOException e) { 127 LOG.warn("Caught Exception", e); 128 ex = e; 129 } 130 if (ex != null) { 131 throw ex; 132 } 133 } 134 135 ArrayList<Long> toArrayList(long... numbers) { 136 ArrayList<Long> result = new ArrayList<>(); 137 for (long i : numbers) { 138 result.add(i); 139 } 140 return result; 141 } 142 143 List<HStoreFile> sfCreate(long... sizes) throws IOException { 144 ArrayList<Long> ageInDisk = new ArrayList<>(); 145 for (int i = 0; i < sizes.length; i++) { 146 ageInDisk.add(0L); 147 } 148 return sfCreate(toArrayList(sizes), ageInDisk); 149 } 150 151 List<HStoreFile> sfCreate(ArrayList<Long> sizes, ArrayList<Long> ageInDisk) throws IOException { 152 return sfCreate(false, sizes, ageInDisk); 153 } 154 155 List<HStoreFile> sfCreate(boolean isReference, long... sizes) throws IOException { 156 ArrayList<Long> ageInDisk = new ArrayList<>(sizes.length); 157 for (int i = 0; i < sizes.length; i++) { 158 ageInDisk.add(0L); 159 } 160 return sfCreate(isReference, toArrayList(sizes), ageInDisk); 161 } 162 163 List<HStoreFile> sfCreate(boolean isReference, ArrayList<Long> sizes, ArrayList<Long> ageInDisk) 164 throws IOException { 165 List<HStoreFile> ret = Lists.newArrayList(); 166 for (int i = 0; i < sizes.size(); i++) { 167 ret.add(new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes.get(i), ageInDisk.get(i), isReference, 168 i)); 169 } 170 return ret; 171 } 172 173 long[] getSizes(List<HStoreFile> sfList) { 174 long[] aNums = new long[sfList.size()]; 175 for (int i = 0; i < sfList.size(); ++i) { 176 aNums[i] = sfList.get(i).getReader().length(); 177 } 178 return aNums; 179 } 180 181 void compactEquals(List<HStoreFile> candidates, long... expected) throws IOException { 182 compactEquals(candidates, false, false, expected); 183 } 184 185 void compactEquals(List<HStoreFile> candidates, boolean forcemajor, long... expected) 186 throws IOException { 187 compactEquals(candidates, forcemajor, false, expected); 188 } 189 190 void compactEquals(List<HStoreFile> candidates, boolean forcemajor, boolean isOffPeak, 191 long... expected) throws IOException { 192 store.forceMajor = forcemajor; 193 // Test Default compactions 194 CompactionRequestImpl result = 195 ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()).selectCompaction( 196 candidates, new ArrayList<>(), false, isOffPeak, forcemajor); 197 List<HStoreFile> actual = new ArrayList<>(result.getFiles()); 198 if (isOffPeak && !forcemajor) { 199 Assert.assertTrue(result.isOffPeak()); 200 } 201 Assert.assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual))); 202 store.forceMajor = false; 203 } 204}