001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import java.io.IOException;
021import java.util.ArrayList;
022import java.util.Arrays;
023import java.util.List;
024import org.apache.hadoop.conf.Configuration;
025import org.apache.hadoop.fs.FileSystem;
026import org.apache.hadoop.fs.Path;
027import org.apache.hadoop.hbase.HBaseTestingUtil;
028import org.apache.hadoop.hbase.HConstants;
029import org.apache.hadoop.hbase.TableName;
030import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
031import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
032import org.apache.hadoop.hbase.client.RegionInfo;
033import org.apache.hadoop.hbase.client.RegionInfoBuilder;
034import org.apache.hadoop.hbase.client.TableDescriptor;
035import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
036import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
037import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
038import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
039import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest;
040import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
041import org.apache.hadoop.hbase.util.Bytes;
042import org.apache.hadoop.hbase.util.CommonFSUtils;
043import org.junit.After;
044import org.junit.Assert;
045import org.junit.Before;
046import org.slf4j.Logger;
047import org.slf4j.LoggerFactory;
048
049import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
050
051public class TestCompactionPolicy {
052  private final static Logger LOG = LoggerFactory.getLogger(TestCompactionPolicy.class);
053  protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
054
055  protected Configuration conf;
056  protected HStore store;
057  private static final String DIR =
058    TEST_UTIL.getDataTestDir(TestCompactionPolicy.class.getSimpleName()).toString();
059  protected static Path TEST_FILE;
060  protected static final int minFiles = 3;
061  protected static final int maxFiles = 5;
062
063  protected static final long minSize = 10;
064  protected static final long maxSize = 2100;
065
066  private FSHLog hlog;
067  private HRegion region;
068
069  @Before
070  public void setUp() throws Exception {
071    config();
072    initialize();
073  }
074
075  /**
076   * setup config values necessary for store
077   */
078  protected void config() {
079    this.conf = TEST_UTIL.getConfiguration();
080    this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
081    this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, minFiles);
082    this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, maxFiles);
083    this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, minSize);
084    this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, maxSize);
085    this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.0F);
086  }
087
088  /**
089   * Setting up a Store
090   * @throws IOException with error
091   */
092  protected void initialize() throws IOException {
093    Path basedir = new Path(DIR);
094    String logName = "logs";
095    Path logdir = new Path(DIR, logName);
096    ColumnFamilyDescriptor familyDescriptor =
097      ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("family"));
098    FileSystem fs = FileSystem.get(conf);
099
100    fs.delete(logdir, true);
101
102    TableDescriptor tableDescriptor =
103      TableDescriptorBuilder.newBuilder(TableName.valueOf(Bytes.toBytes("table")))
104        .setColumnFamily(familyDescriptor).build();
105    RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
106
107    fs.mkdirs(new Path(basedir, logName));
108    hlog = new FSHLog(fs, basedir, logName, conf);
109    hlog.init();
110    ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null,
111      MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
112    region = HRegion.createHRegion(info, basedir, conf, tableDescriptor, hlog);
113    region.close();
114    Path tableDir = CommonFSUtils.getTableDir(basedir, tableDescriptor.getTableName());
115    region = new HRegion(tableDir, hlog, fs, conf, info, tableDescriptor, null);
116
117    store = new HStore(region, familyDescriptor, conf, false);
118
119    TEST_FILE = region.getRegionFileSystem().createTempName();
120    fs.createNewFile(TEST_FILE);
121  }
122
123  @After
124  public void tearDown() throws IOException {
125    IOException ex = null;
126    try {
127      region.close();
128    } catch (IOException e) {
129      LOG.warn("Caught Exception", e);
130      ex = e;
131    }
132    try {
133      hlog.close();
134    } catch (IOException e) {
135      LOG.warn("Caught Exception", e);
136      ex = e;
137    }
138    if (ex != null) {
139      throw ex;
140    }
141  }
142
143  ArrayList<Long> toArrayList(long... numbers) {
144    ArrayList<Long> result = new ArrayList<>();
145    for (long i : numbers) {
146      result.add(i);
147    }
148    return result;
149  }
150
151  List<HStoreFile> sfCreate(long... sizes) throws IOException {
152    ArrayList<Long> ageInDisk = new ArrayList<>();
153    for (int i = 0; i < sizes.length; i++) {
154      ageInDisk.add(0L);
155    }
156    return sfCreate(toArrayList(sizes), ageInDisk);
157  }
158
159  List<HStoreFile> sfCreate(ArrayList<Long> sizes, ArrayList<Long> ageInDisk) throws IOException {
160    return sfCreate(false, sizes, ageInDisk);
161  }
162
163  List<HStoreFile> sfCreate(boolean isReference, long... sizes) throws IOException {
164    ArrayList<Long> ageInDisk = new ArrayList<>(sizes.length);
165    for (int i = 0; i < sizes.length; i++) {
166      ageInDisk.add(0L);
167    }
168    return sfCreate(isReference, toArrayList(sizes), ageInDisk);
169  }
170
171  List<HStoreFile> sfCreate(boolean isReference, ArrayList<Long> sizes, ArrayList<Long> ageInDisk)
172    throws IOException {
173    List<HStoreFile> ret = Lists.newArrayList();
174    StoreFileTrackerForTest storeFileTrackerForTest =
175      new StoreFileTrackerForTest(conf, true, store.getStoreContext());
176    for (int i = 0; i < sizes.size(); i++) {
177      ret.add(new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes.get(i), ageInDisk.get(i), isReference,
178        i, storeFileTrackerForTest));
179    }
180    return ret;
181  }
182
183  long[] getSizes(List<HStoreFile> sfList) {
184    long[] aNums = new long[sfList.size()];
185    for (int i = 0; i < sfList.size(); ++i) {
186      aNums[i] = sfList.get(i).getReader().length();
187    }
188    return aNums;
189  }
190
191  void compactEquals(List<HStoreFile> candidates, long... expected) throws IOException {
192    compactEquals(candidates, false, false, expected);
193  }
194
195  void compactEquals(List<HStoreFile> candidates, boolean forcemajor, long... expected)
196    throws IOException {
197    compactEquals(candidates, forcemajor, false, expected);
198  }
199
200  void compactEquals(List<HStoreFile> candidates, boolean forcemajor, boolean isOffPeak,
201    long... expected) throws IOException {
202    store.forceMajor = forcemajor;
203    // Test Default compactions
204    CompactionRequestImpl result =
205      ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy())
206        .selectCompaction(candidates, new ArrayList<>(), false, isOffPeak, forcemajor);
207    List<HStoreFile> actual = new ArrayList<>(result.getFiles());
208    if (isOffPeak && !forcemajor) {
209      Assert.assertTrue(result.isOffPeak());
210    }
211    Assert.assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
212    store.forceMajor = false;
213  }
214}