001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.junit.Assert.assertEquals;
021
022import java.util.Collection;
023import org.apache.hadoop.conf.Configuration;
024import org.apache.hadoop.fs.FileSystem;
025import org.apache.hadoop.fs.Path;
026import org.apache.hadoop.hbase.HBaseClassTestRule;
027import org.apache.hadoop.hbase.HBaseTestingUtility;
028import org.apache.hadoop.hbase.HColumnDescriptor;
029import org.apache.hadoop.hbase.HTableDescriptor;
030import org.apache.hadoop.hbase.TableName;
031import org.apache.hadoop.hbase.client.Connection;
032import org.apache.hadoop.hbase.client.ConnectionFactory;
033import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
034import org.apache.hadoop.hbase.testclassification.MediumTests;
035import org.apache.hadoop.hbase.util.Bytes;
036import org.apache.hadoop.hbase.util.CommonFSUtils;
037import org.junit.After;
038import org.junit.AfterClass;
039import org.junit.Assert;
040import org.junit.BeforeClass;
041import org.junit.ClassRule;
042import org.junit.Test;
043import org.junit.experimental.categories.Category;
044import org.slf4j.Logger;
045import org.slf4j.LoggerFactory;
046
047@Category(MediumTests.class)
048public class TestCompactSplitThread {
049
050  @ClassRule
051  public static final HBaseClassTestRule CLASS_RULE =
052      HBaseClassTestRule.forClass(TestCompactSplitThread.class);
053
054  private static final Logger LOG = LoggerFactory.getLogger(TestCompactSplitThread.class);
055  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
056  private final TableName tableName = TableName.valueOf(getClass().getSimpleName());
057  private final byte[] family = Bytes.toBytes("f");
058  private static final int NUM_RS = 1;
059  private static final int blockingStoreFiles = 3;
060  private static Path rootDir;
061  private static FileSystem fs;
062
063
064
065  /**
066   * Setup the config for the cluster
067   */
068  @BeforeClass
069  public static void setupCluster() throws Exception {
070    setupConf(TEST_UTIL.getConfiguration());
071    TEST_UTIL.startMiniCluster(NUM_RS);
072    fs = TEST_UTIL.getDFSCluster().getFileSystem();
073    rootDir = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
074
075  }
076
077  private static void setupConf(Configuration conf) {
078    // disable the ui
079    conf.setInt("hbase.regionsever.info.port", -1);
080    // so make sure we get a compaction when doing a load, but keep around some
081    // files in the store
082    conf.setInt("hbase.hstore.compaction.min", 2);
083    conf.setInt("hbase.hstore.compactionThreshold", 5);
084    // change the flush size to a small amount, regulating number of store files
085    conf.setInt("hbase.hregion.memstore.flush.size", 25000);
086
087    // block writes if we get to blockingStoreFiles store files
088    conf.setInt("hbase.hstore.blockingStoreFiles", blockingStoreFiles);
089    // Ensure no extra cleaners on by default (e.g. TimeToLiveHFileCleaner)
090    conf.setInt(CompactSplit.LARGE_COMPACTION_THREADS, 3);
091    conf.setInt(CompactSplit.SMALL_COMPACTION_THREADS, 4);
092    conf.setInt(CompactSplit.SPLIT_THREADS, 5);
093  }
094
095  @After
096  public void tearDown() throws Exception {
097    TEST_UTIL.deleteTable(tableName);
098  }
099
100  @AfterClass
101  public static void cleanupTest() throws Exception {
102    try {
103      TEST_UTIL.shutdownMiniCluster();
104    } catch (Exception e) {
105      // NOOP;
106    }
107  }
108
109  @Test
110  public void testThreadPoolSizeTuning() throws Exception {
111    Configuration conf = TEST_UTIL.getConfiguration();
112    Connection conn = ConnectionFactory.createConnection(conf);
113    try {
114      HTableDescriptor htd = new HTableDescriptor(tableName);
115      htd.addFamily(new HColumnDescriptor(family));
116      htd.setCompactionEnabled(false);
117      TEST_UTIL.getAdmin().createTable(htd);
118      TEST_UTIL.waitTableAvailable(tableName);
119      HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(tableName);
120
121      // check initial configuration of thread pool sizes
122      assertEquals(3, regionServer.compactSplitThread.getLargeCompactionThreadNum());
123      assertEquals(4, regionServer.compactSplitThread.getSmallCompactionThreadNum());
124      assertEquals(5, regionServer.compactSplitThread.getSplitThreadNum());
125
126      // change bigger configurations and do online update
127      conf.setInt(CompactSplit.LARGE_COMPACTION_THREADS, 4);
128      conf.setInt(CompactSplit.SMALL_COMPACTION_THREADS, 5);
129      conf.setInt(CompactSplit.SPLIT_THREADS, 6);
130      try {
131        regionServer.compactSplitThread.onConfigurationChange(conf);
132      } catch (IllegalArgumentException iae) {
133        Assert.fail("Update bigger configuration failed!");
134      }
135
136      // check again after online update
137      assertEquals(4, regionServer.compactSplitThread.getLargeCompactionThreadNum());
138      assertEquals(5, regionServer.compactSplitThread.getSmallCompactionThreadNum());
139      assertEquals(6, regionServer.compactSplitThread.getSplitThreadNum());
140
141      // change smaller configurations and do online update
142      conf.setInt(CompactSplit.LARGE_COMPACTION_THREADS, 2);
143      conf.setInt(CompactSplit.SMALL_COMPACTION_THREADS, 3);
144      conf.setInt(CompactSplit.SPLIT_THREADS, 4);
145      try {
146        regionServer.compactSplitThread.onConfigurationChange(conf);
147      } catch (IllegalArgumentException iae) {
148        Assert.fail("Update smaller configuration failed!");
149      }
150
151      // check again after online update
152      assertEquals(2, regionServer.compactSplitThread.getLargeCompactionThreadNum());
153      assertEquals(3, regionServer.compactSplitThread.getSmallCompactionThreadNum());
154      assertEquals(4, regionServer.compactSplitThread.getSplitThreadNum());
155    } finally {
156      conn.close();
157    }
158  }
159
160  @Test
161  public void testFlushWithTableCompactionDisabled() throws Exception {
162    HTableDescriptor htd = new HTableDescriptor(tableName);
163    htd.setCompactionEnabled(false);
164    TEST_UTIL.createTable(htd, new byte[][] { family }, null);
165
166    // load the table
167    for (int i = 0; i < blockingStoreFiles + 1; i ++) {
168      TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(tableName), family);
169      TEST_UTIL.flush(tableName);
170    }
171
172    // Make sure that store file number is greater than blockingStoreFiles + 1
173    Path tableDir = CommonFSUtils.getTableDir(rootDir, tableName);
174    Collection<String> hfiles =  SnapshotTestingUtils.listHFileNames(fs, tableDir);
175    assert(hfiles.size() > blockingStoreFiles + 1);
176  }
177}