001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver.throttle;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertTrue;
022
023import java.io.IOException;
024import java.util.List;
025import java.util.concurrent.TimeUnit;
026import org.apache.hadoop.conf.Configuration;
027import org.apache.hadoop.hbase.HBaseClassTestRule;
028import org.apache.hadoop.hbase.HBaseTestingUtil;
029import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
030import org.apache.hadoop.hbase.TableName;
031import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
032import org.apache.hadoop.hbase.client.Connection;
033import org.apache.hadoop.hbase.client.ConnectionFactory;
034import org.apache.hadoop.hbase.client.Put;
035import org.apache.hadoop.hbase.client.Table;
036import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
037import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
038import org.apache.hadoop.hbase.regionserver.HRegion;
039import org.apache.hadoop.hbase.regionserver.HRegionServer;
040import org.apache.hadoop.hbase.regionserver.HStore;
041import org.apache.hadoop.hbase.regionserver.Region;
042import org.apache.hadoop.hbase.regionserver.StoreEngine;
043import org.apache.hadoop.hbase.regionserver.StripeStoreEngine;
044import org.apache.hadoop.hbase.testclassification.LargeTests;
045import org.apache.hadoop.hbase.util.Bytes;
046import org.apache.hadoop.hbase.util.JVMClusterUtil;
047import org.apache.hadoop.hbase.util.Pair;
048import org.junit.After;
049import org.junit.Before;
050import org.junit.ClassRule;
051import org.junit.Rule;
052import org.junit.Test;
053import org.junit.experimental.categories.Category;
054import org.junit.rules.TestName;
055import org.slf4j.Logger;
056import org.slf4j.LoggerFactory;
057
058@Category(LargeTests.class)
059public class TestFlushWithThroughputController {
060
061  @ClassRule
062  public static final HBaseClassTestRule CLASS_RULE =
063    HBaseClassTestRule.forClass(TestFlushWithThroughputController.class);
064
065  private static final Logger LOG =
066    LoggerFactory.getLogger(TestFlushWithThroughputController.class);
067  private static final double EPSILON = 1.3E-6;
068
069  private HBaseTestingUtil hbtu;
070  @Rule
071  public TestName testName = new TestName();
072  private TableName tableName;
073  private final byte[] family = Bytes.toBytes("f");
074  private final byte[] qualifier = Bytes.toBytes("q");
075
076  @Before
077  public void setUp() {
078    hbtu = new HBaseTestingUtil();
079    tableName = TableName.valueOf("Table-" + testName.getMethodName());
080    hbtu.getConfiguration().set(
081      FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY,
082      PressureAwareFlushThroughputController.class.getName());
083  }
084
085  @After
086  public void tearDown() throws Exception {
087    hbtu.shutdownMiniCluster();
088  }
089
090  private HStore getStoreWithName(TableName tableName) {
091    SingleProcessHBaseCluster cluster = hbtu.getMiniHBaseCluster();
092    List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
093    for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
094      HRegionServer hrs = rsts.get(i).getRegionServer();
095      for (Region region : hrs.getRegions(tableName)) {
096        return ((HRegion) region).getStores().iterator().next();
097      }
098    }
099    return null;
100  }
101
102  private void setMaxMinThroughputs(long max, long min) {
103    Configuration conf = hbtu.getConfiguration();
104    conf.setLong(
105      PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND, min);
106    conf.setLong(
107      PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND, max);
108  }
109
110  /**
111   * Writes Puts to the table and flushes few times.
112   * @return {@link Pair} of (throughput, duration).
113   */
114  private Pair<Double, Long> generateAndFlushData(Table table) throws IOException {
115    // Internally, throughput is controlled after every cell write, so keep value size less for
116    // better control.
117    final int NUM_FLUSHES = 3, NUM_PUTS = 50, VALUE_SIZE = 200 * 1024;
118    long duration = 0;
119    for (int i = 0; i < NUM_FLUSHES; i++) {
120      // Write about 10M (10 times of throughput rate) per iteration.
121      for (int j = 0; j < NUM_PUTS; j++) {
122        byte[] value = new byte[VALUE_SIZE];
123        Bytes.random(value);
124        table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
125      }
126      long startTime = System.nanoTime();
127      hbtu.getAdmin().flush(tableName);
128      duration += System.nanoTime() - startTime;
129    }
130    HStore store = getStoreWithName(tableName);
131    assertEquals(NUM_FLUSHES, store.getStorefilesCount());
132    double throughput =
133      (double) store.getStorefilesSize() / TimeUnit.NANOSECONDS.toSeconds(duration);
134    return new Pair<>(throughput, duration);
135  }
136
137  private long testFlushWithThroughputLimit() throws Exception {
138    final long throughputLimit = 1024 * 1024;
139    setMaxMinThroughputs(throughputLimit, throughputLimit);
140    Configuration conf = hbtu.getConfiguration();
141    conf.setLong(
142      PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL,
143      throughputLimit);
144    hbtu.startMiniCluster(1);
145    Table table = hbtu.createTable(tableName, family);
146    Pair<Double, Long> result = generateAndFlushData(table);
147    hbtu.deleteTable(tableName);
148    LOG.debug("Throughput is: " + (result.getFirst() / 1024 / 1024) + " MB/s");
149    // confirm that the speed limit work properly(not too fast, and also not too slow)
150    // 20% is the max acceptable error rate.
151    assertTrue(result.getFirst() < throughputLimit * 1.2);
152    assertTrue(result.getFirst() > throughputLimit * 0.8);
153    return result.getSecond();
154  }
155
156  @Test
157  public void testFlushControl() throws Exception {
158    testFlushWithThroughputLimit();
159  }
160
161  /**
162   * Test the tuning task of {@link PressureAwareFlushThroughputController}
163   */
164  @Test
165  public void testFlushThroughputTuning() throws Exception {
166    Configuration conf = hbtu.getConfiguration();
167    setMaxMinThroughputs(20L * 1024 * 1024, 10L * 1024 * 1024);
168    conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
169    conf.setInt(PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD,
170      3000);
171    hbtu.startMiniCluster(1);
172    Connection conn = ConnectionFactory.createConnection(conf);
173    hbtu.getAdmin()
174      .createTable(TableDescriptorBuilder.newBuilder(tableName)
175        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).setCompactionEnabled(false)
176        .build());
177    hbtu.waitTableAvailable(tableName);
178    HRegionServer regionServer = hbtu.getRSForFirstRegionInTable(tableName);
179    double pressure = regionServer.getFlushPressure();
180    LOG.debug("Flush pressure before flushing: " + pressure);
181    PressureAwareFlushThroughputController throughputController =
182      (PressureAwareFlushThroughputController) regionServer.getFlushThroughputController();
183    for (HRegion region : regionServer.getRegions()) {
184      region.flush(true);
185    }
186    // We used to assert that the flush pressure is zero but after HBASE-15787 or HBASE-18294 we
187    // changed to use heapSize instead of dataSize to calculate the flush pressure, and since
188    // heapSize will never be zero, so flush pressure will never be zero either. So we changed the
189    // assertion here.
190    assertTrue(regionServer.getFlushPressure() < pressure);
191    Thread.sleep(5000);
192    Table table = conn.getTable(tableName);
193    for (int i = 0; i < 10; i++) {
194      for (int j = 0; j < 10; j++) {
195        byte[] value = new byte[256 * 1024];
196        Bytes.random(value);
197        table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
198      }
199    }
200    Thread.sleep(5000);
201    double expectedThroughPut = 10L * 1024 * 1024 * (1 + regionServer.getFlushPressure());
202    assertEquals(expectedThroughPut, throughputController.getMaxThroughput(), EPSILON);
203
204    conf.set(FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY,
205      NoLimitThroughputController.class.getName());
206    regionServer.onConfigurationChange(conf);
207    assertTrue(throughputController.isStopped());
208    assertTrue(regionServer.getFlushThroughputController() instanceof NoLimitThroughputController);
209    conn.close();
210  }
211
212  /**
213   * Test the logic for striped store.
214   */
215  @Test
216  public void testFlushControlForStripedStore() throws Exception {
217    hbtu.getConfiguration().set(StoreEngine.STORE_ENGINE_CLASS_KEY,
218      StripeStoreEngine.class.getName());
219    testFlushWithThroughputLimit();
220  }
221}