001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import static junit.framework.TestCase.assertEquals;
021
022import java.util.ArrayList;
023import java.util.List;
024import org.apache.hadoop.hbase.Cell;
025import org.apache.hadoop.hbase.CellBuilderFactory;
026import org.apache.hadoop.hbase.CellBuilderType;
027import org.apache.hadoop.hbase.CompatibilityFactory;
028import org.apache.hadoop.hbase.HBaseClassTestRule;
029import org.apache.hadoop.hbase.HBaseTestingUtility;
030import org.apache.hadoop.hbase.HColumnDescriptor;
031import org.apache.hadoop.hbase.HConstants;
032import org.apache.hadoop.hbase.HTableDescriptor;
033import org.apache.hadoop.hbase.TableName;
034import org.apache.hadoop.hbase.Waiter;
035import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
036import org.apache.hadoop.hbase.ipc.RpcServerInterface;
037import org.apache.hadoop.hbase.logging.Log4jUtils;
038import org.apache.hadoop.hbase.metrics.BaseSource;
039import org.apache.hadoop.hbase.regionserver.HRegionServer;
040import org.apache.hadoop.hbase.test.MetricsAssertHelper;
041import org.apache.hadoop.hbase.testclassification.ClientTests;
042import org.apache.hadoop.hbase.testclassification.MediumTests;
043import org.apache.hadoop.hbase.util.Bytes;
044import org.junit.AfterClass;
045import org.junit.BeforeClass;
046import org.junit.ClassRule;
047import org.junit.Rule;
048import org.junit.Test;
049import org.junit.experimental.categories.Category;
050import org.junit.rules.TestName;
051
052/**
053 * This test sets the multi size WAAAAAY low and then checks to make sure that gets will still make
054 * progress.
055 */
056@Category({ MediumTests.class, ClientTests.class })
057public class TestMultiRespectsLimits {
058
059  @ClassRule
060  public static final HBaseClassTestRule CLASS_RULE =
061    HBaseClassTestRule.forClass(TestMultiRespectsLimits.class);
062
063  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
064  private static final MetricsAssertHelper METRICS_ASSERT =
065    CompatibilityFactory.getInstance(MetricsAssertHelper.class);
066  private final static byte[] FAMILY = Bytes.toBytes("D");
067  public static final int MAX_SIZE = 100;
068  private static String LOG_LEVEL;
069
070  @Rule
071  public TestName name = new TestName();
072
073  @BeforeClass
074  public static void setUpBeforeClass() throws Exception {
075    // disable the debug log to avoid flooding the output
076    LOG_LEVEL = Log4jUtils.getEffectiveLevel(AsyncRegionLocatorHelper.class.getName());
077    Log4jUtils.setLogLevel(AsyncRegionLocatorHelper.class.getName(), "INFO");
078    TEST_UTIL.getConfiguration().setLong(HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY,
079      MAX_SIZE);
080
081    // Only start on regionserver so that all regions are on the same server.
082    TEST_UTIL.startMiniCluster(1);
083  }
084
085  @AfterClass
086  public static void tearDownAfterClass() throws Exception {
087    if (LOG_LEVEL != null) {
088      Log4jUtils.setLogLevel(AsyncRegionLocatorHelper.class.getName(), LOG_LEVEL);
089    }
090    TEST_UTIL.shutdownMiniCluster();
091  }
092
093  @Test
094  public void testMultiLimits() throws Exception {
095    final TableName tableName = TableName.valueOf(name.getMethodName());
096    Table t = TEST_UTIL.createTable(tableName, FAMILY);
097    TEST_UTIL.loadTable(t, FAMILY, false);
098
099    // Split the table to make sure that the chunking happens accross regions.
100    try (final Admin admin = TEST_UTIL.getAdmin()) {
101      admin.split(tableName);
102      TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
103        @Override
104        public boolean evaluate() throws Exception {
105          return admin.getTableRegions(tableName).size() > 1;
106        }
107      });
108    }
109    List<Get> gets = new ArrayList<>(MAX_SIZE);
110
111    for (int i = 0; i < MAX_SIZE; i++) {
112      gets.add(new Get(HBaseTestingUtility.ROWS[i]));
113    }
114
115    RpcServerInterface rpcServer = TEST_UTIL.getHBaseCluster().getRegionServer(0).getRpcServer();
116    BaseSource s = rpcServer.getMetrics().getMetricsSource();
117    long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s);
118    long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s);
119
120    Result[] results = t.get(gets);
121    assertEquals(MAX_SIZE, results.length);
122
123    // Cells from TEST_UTIL.loadTable have a length of 27.
124    // Multiplying by less than that gives an easy lower bound on size.
125    // However in reality each kv is being reported as much higher than that.
126    METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions + ((MAX_SIZE * 25) / MAX_SIZE),
127      s);
128    METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge",
129      startingMultiExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s);
130  }
131
132  @Test
133  public void testBlockMultiLimits() throws Exception {
134    final TableName tableName = TableName.valueOf(name.getMethodName());
135    HTableDescriptor desc = new HTableDescriptor(tableName);
136    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
137    hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
138    desc.addFamily(hcd);
139    TEST_UTIL.getAdmin().createTable(desc);
140    Table t = TEST_UTIL.getConnection().getTable(tableName);
141
142    final HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
143    RpcServerInterface rpcServer = regionServer.getRpcServer();
144    BaseSource s = rpcServer.getMetrics().getMetricsSource();
145    long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s);
146    long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s);
147
148    byte[] row = Bytes.toBytes("TEST");
149    byte[][] cols = new byte[][] { Bytes.toBytes("0"), // Get this
150      Bytes.toBytes("1"), // Buffer
151      Bytes.toBytes("2"), // Buffer
152      Bytes.toBytes("3"), // Get This
153      Bytes.toBytes("4"), // Buffer
154      Bytes.toBytes("5"), // Buffer
155    };
156
157    // Set the value size so that one result will be less than the MAX_SIZE
158    // however the block being reference will be larger than MAX_SIZE.
159    // This should cause the regionserver to try and send a result immediately.
160    byte[] value = new byte[1];
161    Bytes.random(value);
162
163    for (byte[] col : cols) {
164      Put p = new Put(row);
165      p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(FAMILY)
166        .setQualifier(col).setTimestamp(p.getTimestamp()).setType(Cell.Type.Put).setValue(value)
167        .build());
168      t.put(p);
169    }
170
171    // Make sure that a flush happens
172    try (final Admin admin = TEST_UTIL.getAdmin()) {
173      admin.flush(tableName);
174      TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
175        @Override
176        public boolean evaluate() throws Exception {
177          return regionServer.getRegions(tableName).get(0).getMaxFlushedSeqId() > 3;
178        }
179      });
180    }
181
182    List<Get> gets = new ArrayList<>(2);
183    Get g0 = new Get(row);
184    g0.addColumn(FAMILY, cols[0]);
185    gets.add(g0);
186
187    Get g2 = new Get(row);
188    g2.addColumn(FAMILY, cols[3]);
189    gets.add(g2);
190
191    Result[] results = t.get(gets);
192    assertEquals(2, results.length);
193    METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions, s);
194    METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", startingMultiExceptions, s);
195  }
196}