001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import static org.junit.Assert.assertEquals;
021
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.List;
025import org.apache.hadoop.hbase.Cell;
026import org.apache.hadoop.hbase.CellBuilderFactory;
027import org.apache.hadoop.hbase.CellBuilderType;
028import org.apache.hadoop.hbase.CompareOperator;
029import org.apache.hadoop.hbase.CompatibilityFactory;
030import org.apache.hadoop.hbase.HBaseClassTestRule;
031import org.apache.hadoop.hbase.HBaseTestingUtil;
032import org.apache.hadoop.hbase.HConstants;
033import org.apache.hadoop.hbase.TableName;
034import org.apache.hadoop.hbase.Waiter;
035import org.apache.hadoop.hbase.filter.BinaryComparator;
036import org.apache.hadoop.hbase.filter.QualifierFilter;
037import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
038import org.apache.hadoop.hbase.ipc.RpcServerInterface;
039import org.apache.hadoop.hbase.logging.Log4jUtils;
040import org.apache.hadoop.hbase.metrics.BaseSource;
041import org.apache.hadoop.hbase.regionserver.HRegion;
042import org.apache.hadoop.hbase.regionserver.HRegionServer;
043import org.apache.hadoop.hbase.test.MetricsAssertHelper;
044import org.apache.hadoop.hbase.testclassification.ClientTests;
045import org.apache.hadoop.hbase.testclassification.MediumTests;
046import org.apache.hadoop.hbase.util.Bytes;
047import org.junit.AfterClass;
048import org.junit.BeforeClass;
049import org.junit.ClassRule;
050import org.junit.Rule;
051import org.junit.Test;
052import org.junit.experimental.categories.Category;
053import org.junit.rules.TestName;
054
055/**
056 * This test sets the multi size WAAAAAY low and then checks to make sure that gets will still make
057 * progress.
058 */
059@Category({ MediumTests.class, ClientTests.class })
060public class TestMultiRespectsLimits {
061
062  @ClassRule
063  public static final HBaseClassTestRule CLASS_RULE =
064    HBaseClassTestRule.forClass(TestMultiRespectsLimits.class);
065
066  private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
067  private static final MetricsAssertHelper METRICS_ASSERT =
068    CompatibilityFactory.getInstance(MetricsAssertHelper.class);
069  private final static byte[] FAMILY = Bytes.toBytes("D");
070  public static final int MAX_SIZE = 90;
071  private static String LOG_LEVEL;
072
073  @Rule
074  public TestName name = new TestName();
075
076  @BeforeClass
077  public static void setUpBeforeClass() throws Exception {
078    // disable the debug log to avoid flooding the output
079    LOG_LEVEL = Log4jUtils.getEffectiveLevel(AsyncRegionLocatorHelper.class.getName());
080    Log4jUtils.setLogLevel(AsyncRegionLocatorHelper.class.getName(), "INFO");
081    TEST_UTIL.getConfiguration().setLong(HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY,
082      MAX_SIZE);
083
084    // Only start on regionserver so that all regions are on the same server.
085    TEST_UTIL.startMiniCluster(1);
086  }
087
088  @AfterClass
089  public static void tearDownAfterClass() throws Exception {
090    if (LOG_LEVEL != null) {
091      Log4jUtils.setLogLevel(AsyncRegionLocatorHelper.class.getName(), LOG_LEVEL);
092    }
093    TEST_UTIL.shutdownMiniCluster();
094  }
095
096  @Test
097  public void testMultiLimits() throws Exception {
098    final TableName tableName = TableName.valueOf(name.getMethodName());
099    Table t = TEST_UTIL.createTable(tableName, FAMILY);
100    TEST_UTIL.loadTable(t, FAMILY, false);
101
102    // Split the table to make sure that the chunking happens accross regions.
103    try (final Admin admin = TEST_UTIL.getAdmin()) {
104      admin.split(tableName);
105      TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
106        @Override
107        public boolean evaluate() throws Exception {
108          return admin.getRegions(tableName).size() > 1;
109        }
110      });
111    }
112    List<Get> gets = new ArrayList<>(MAX_SIZE);
113
114    for (int i = 0; i < MAX_SIZE; i++) {
115      gets.add(new Get(HBaseTestingUtil.ROWS[i]));
116    }
117
118    RpcServerInterface rpcServer = TEST_UTIL.getHBaseCluster().getRegionServer(0).getRpcServer();
119    BaseSource s = rpcServer.getMetrics().getMetricsSource();
120    long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s);
121    long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s);
122
123    Result[] results = t.get(gets);
124    assertEquals(MAX_SIZE, results.length);
125
126    // Cells from TEST_UTIL.loadTable have a length of 27.
127    // Multiplying by less than that gives an easy lower bound on size.
128    // However in reality each kv is being reported as much higher than that.
129    METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions + ((MAX_SIZE * 25) / MAX_SIZE),
130      s);
131    METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge",
132      startingMultiExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s);
133  }
134
135  @Test
136  public void testBlockMultiLimits() throws Exception {
137    final TableName tableName = TableName.valueOf(name.getMethodName());
138    TEST_UTIL.getAdmin().createTable(
139      TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder
140        .newBuilder(FAMILY).setDataBlockEncoding(DataBlockEncoding.FAST_DIFF).build()).build());
141    Table t = TEST_UTIL.getConnection().getTable(tableName);
142
143    final HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
144    RpcServerInterface rpcServer = regionServer.getRpcServer();
145    BaseSource s = rpcServer.getMetrics().getMetricsSource();
146    long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s);
147    long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s);
148
149    byte[] row = Bytes.toBytes("TEST");
150    byte[][] cols = new byte[][] { Bytes.toBytes("0"), // Get this
151      Bytes.toBytes("1"), // Buffer
152      Bytes.toBytes("2"), // Buffer
153      Bytes.toBytes("3"), // Get This
154      Bytes.toBytes("4"), // Buffer
155      Bytes.toBytes("5"), // Buffer
156      Bytes.toBytes("6"), // Buffer
157      Bytes.toBytes("7"), // Get This
158      Bytes.toBytes("8"), // Buffer
159      Bytes.toBytes("9"), // Buffer
160    };
161
162    // Set the value size so that one result will be less than the MAX_SIZE
163    // however the block being reference will be larger than MAX_SIZE.
164    // This should cause the regionserver to try and send a result immediately.
165    byte[] value = new byte[1];
166    Bytes.random(value);
167
168    for (int i = 0; i < cols.length; i++) {
169      if (i == 6) {
170        // do a flush here so we end up with 2 blocks, 55 and 45 bytes
171        flush(regionServer, tableName);
172      }
173      byte[] col = cols[i];
174      Put p = new Put(row);
175      p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(FAMILY)
176        .setQualifier(col).setTimestamp(p.getTimestamp()).setType(Cell.Type.Put).setValue(value)
177        .build());
178      t.put(p);
179    }
180
181    // Make sure that a flush happens
182    flush(regionServer, tableName);
183
184    List<Get> gets = new ArrayList<>(4);
185    // This get returns nothing since the filter doesn't match. Filtered cells still retain
186    // blocks, and this is a full row scan of both blocks. This equals 100 bytes so we should
187    // throw a multiResponseTooLarge after this get if we are counting filtered cells correctly.
188    Get g0 = new Get(row).addFamily(FAMILY).setFilter(
189      new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("sdf"))));
190    gets.add(g0);
191
192    // g1 and g2 each count the first 55 byte block, so we end up with block size of 110
193    // after g2 and throw a multiResponseTooLarge before g3
194    Get g1 = new Get(row);
195    g1.addColumn(FAMILY, cols[0]);
196    gets.add(g1);
197
198    Get g2 = new Get(row);
199    g2.addColumn(FAMILY, cols[3]);
200    gets.add(g2);
201
202    Get g3 = new Get(row);
203    g3.addColumn(FAMILY, cols[7]);
204    gets.add(g3);
205
206    Result[] results = t.get(gets);
207    assertEquals(4, results.length);
208    // Expect 2 exceptions (thus 3 rpcs) -- one for g0, then another for g1 + g2, final rpc for g3.
209    // If we tracked lastBlock we could squeeze g3 into the second rpc because g2 would be "free"
210    // since it's in the same block as g1.
211    METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions + 1, s);
212    METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", startingMultiExceptions + 1,
213      s);
214  }
215
216  private void flush(HRegionServer regionServer, TableName tableName) throws IOException {
217    for (HRegion region : regionServer.getRegions(tableName)) {
218      region.flush(true);
219    }
220  }
221}