001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018
019package org.apache.hadoop.hbase.regionserver;
020
021import java.io.IOException;
022import java.util.ArrayList;
023import java.util.Arrays;
024import java.util.List;
025import org.apache.hadoop.conf.Configuration;
026import org.apache.hadoop.fs.FileSystem;
027import org.apache.hadoop.fs.Path;
028import org.apache.hadoop.hbase.HBaseTestingUtility;
029import org.apache.hadoop.hbase.HColumnDescriptor;
030import org.apache.hadoop.hbase.HConstants;
031import org.apache.hadoop.hbase.HRegionInfo;
032import org.apache.hadoop.hbase.HTableDescriptor;
033import org.apache.hadoop.hbase.TableName;
034import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
035import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
036import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
037import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
038import org.apache.hadoop.hbase.util.Bytes;
039import org.apache.hadoop.hbase.util.CommonFSUtils;
040import org.junit.After;
041import org.junit.Assert;
042import org.junit.Before;
043import org.slf4j.Logger;
044import org.slf4j.LoggerFactory;
045
046import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
047
048public class TestCompactionPolicy {
049  private final static Logger LOG = LoggerFactory.getLogger(TestCompactionPolicy.class);
050  protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
051
052  protected Configuration conf;
053  protected HStore store;
054  private static final String DIR = TEST_UTIL.getDataTestDir(
055    TestCompactionPolicy.class.getSimpleName()).toString();
056  protected static Path TEST_FILE;
057  protected static final int minFiles = 3;
058  protected static final int maxFiles = 5;
059
060  protected static final long minSize = 10;
061  protected static final long maxSize = 2100;
062
063  private FSHLog hlog;
064  private HRegion region;
065
066  @Before
067  public void setUp() throws Exception {
068    config();
069    initialize();
070  }
071
072  /**
073   * setup config values necessary for store
074   */
075  protected void config() {
076    this.conf = TEST_UTIL.getConfiguration();
077    this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
078    this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, minFiles);
079    this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, maxFiles);
080    this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, minSize);
081    this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, maxSize);
082    this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.0F);
083  }
084
085  /**
086   * Setting up a Store
087   * @throws IOException with error
088   */
089  protected void initialize() throws IOException {
090    Path basedir = new Path(DIR);
091    String logName = "logs";
092    Path logdir = new Path(DIR, logName);
093    HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
094    FileSystem fs = FileSystem.get(conf);
095
096    fs.delete(logdir, true);
097
098    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table")));
099    htd.addFamily(hcd);
100    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
101
102    hlog = new FSHLog(fs, basedir, logName, conf);
103    ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
104    region = HRegion.createHRegion(info, basedir, conf, htd, hlog);
105    region.close();
106    Path tableDir = CommonFSUtils.getTableDir(basedir, htd.getTableName());
107    region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
108    store = new HStore(region, hcd, conf, false);
109
110    TEST_FILE = region.getRegionFileSystem().createTempName();
111    fs.createNewFile(TEST_FILE);
112  }
113
114  @After
115  public void tearDown() throws IOException {
116    IOException ex = null;
117    try {
118      region.close();
119    } catch (IOException e) {
120      LOG.warn("Caught Exception", e);
121      ex = e;
122    }
123    try {
124      hlog.close();
125    } catch (IOException e) {
126      LOG.warn("Caught Exception", e);
127      ex = e;
128    }
129    if (ex != null) {
130      throw ex;
131    }
132  }
133
134  ArrayList<Long> toArrayList(long... numbers) {
135    ArrayList<Long> result = new ArrayList<>();
136    for (long i : numbers) {
137      result.add(i);
138    }
139    return result;
140  }
141
142  List<HStoreFile> sfCreate(long... sizes) throws IOException {
143    ArrayList<Long> ageInDisk = new ArrayList<>();
144    for (int i = 0; i < sizes.length; i++) {
145      ageInDisk.add(0L);
146    }
147    return sfCreate(toArrayList(sizes), ageInDisk);
148  }
149
150  List<HStoreFile> sfCreate(ArrayList<Long> sizes, ArrayList<Long> ageInDisk) throws IOException {
151    return sfCreate(false, sizes, ageInDisk);
152  }
153
154  List<HStoreFile> sfCreate(boolean isReference, long... sizes) throws IOException {
155    ArrayList<Long> ageInDisk = new ArrayList<>(sizes.length);
156    for (int i = 0; i < sizes.length; i++) {
157      ageInDisk.add(0L);
158    }
159    return sfCreate(isReference, toArrayList(sizes), ageInDisk);
160  }
161
162  List<HStoreFile> sfCreate(boolean isReference, ArrayList<Long> sizes, ArrayList<Long> ageInDisk)
163      throws IOException {
164    List<HStoreFile> ret = Lists.newArrayList();
165    for (int i = 0; i < sizes.size(); i++) {
166      ret.add(new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes.get(i), ageInDisk.get(i), isReference,
167          i));
168    }
169    return ret;
170  }
171
172  long[] getSizes(List<HStoreFile> sfList) {
173    long[] aNums = new long[sfList.size()];
174    for (int i = 0; i < sfList.size(); ++i) {
175      aNums[i] = sfList.get(i).getReader().length();
176    }
177    return aNums;
178  }
179
180  void compactEquals(List<HStoreFile> candidates, long... expected) throws IOException {
181    compactEquals(candidates, false, false, expected);
182  }
183
184  void compactEquals(List<HStoreFile> candidates, boolean forcemajor, long... expected)
185      throws IOException {
186    compactEquals(candidates, forcemajor, false, expected);
187  }
188
189  void compactEquals(List<HStoreFile> candidates, boolean forcemajor, boolean isOffPeak,
190      long... expected) throws IOException {
191    store.forceMajor = forcemajor;
192    // Test Default compactions
193    CompactionRequestImpl result =
194        ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()).selectCompaction(
195          candidates, new ArrayList<>(), false, isOffPeak, forcemajor);
196    List<HStoreFile> actual = new ArrayList<>(result.getFiles());
197    if (isOffPeak && !forcemajor) {
198      Assert.assertTrue(result.isOffPeak());
199    }
200    Assert.assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
201    store.forceMajor = false;
202  }
203}