001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import java.io.IOException;
021import org.apache.hadoop.fs.Path;
022import org.apache.hadoop.hbase.HBaseClassTestRule;
023import org.apache.hadoop.hbase.HBaseTestingUtility;
024import org.apache.hadoop.hbase.HColumnDescriptor;
025import org.apache.hadoop.hbase.HConstants;
026import org.apache.hadoop.hbase.HRegionInfo;
027import org.apache.hadoop.hbase.HTableDescriptor;
028import org.apache.hadoop.hbase.TableName;
029import org.apache.hadoop.hbase.client.Get;
030import org.apache.hadoop.hbase.client.Put;
031import org.apache.hadoop.hbase.client.RowTooBigException;
032import org.apache.hadoop.hbase.testclassification.MediumTests;
033import org.apache.hadoop.hbase.testclassification.RegionServerTests;
034import org.apache.hadoop.hbase.util.Bytes;
035import org.junit.AfterClass;
036import org.junit.BeforeClass;
037import org.junit.ClassRule;
038import org.junit.Test;
039import org.junit.experimental.categories.Category;
040
041/**
042 * Test case to check HRS throws {@link org.apache.hadoop.hbase.client.RowTooBigException}
043 * when row size exceeds configured limits.
044 */
045@Category({RegionServerTests.class, MediumTests.class})
046public class TestRowTooBig {
047
048  @ClassRule
049  public static final HBaseClassTestRule CLASS_RULE =
050      HBaseClassTestRule.forClass(TestRowTooBig.class);
051
052  private final static HBaseTestingUtility HTU = HBaseTestingUtility.createLocalHTU();
053  private static Path rootRegionDir;
054  private static final HTableDescriptor TEST_HTD =
055    new HTableDescriptor(TableName.valueOf(TestRowTooBig.class.getSimpleName()));
056
057  @BeforeClass
058  public static void before() throws Exception {
059    HTU.startMiniCluster();
060    HTU.getConfiguration().setLong(HConstants.TABLE_MAX_ROWSIZE_KEY,
061      10 * 1024 * 1024L);
062    rootRegionDir = HTU.getDataTestDirOnTestFS("TestRowTooBig");
063  }
064
065  @AfterClass
066  public static void after() throws Exception {
067    HTU.shutdownMiniCluster();
068  }
069
070  /**
071   * Usecase:
072   *  - create a row with 5 large  cells (5 Mb each)
073   *  - flush memstore but don't compact storefiles.
074   *  - try to Get whole row.
075   *
076   * OOME happened before we actually get to reading results, but
077   * during seeking, as each StoreFile gets it's own scanner,
078   * and each scanner seeks after the first KV.
079   * @throws IOException
080   */
081  @Test(expected = RowTooBigException.class)
082  public void testScannersSeekOnFewLargeCells() throws IOException {
083    byte[] row1 = Bytes.toBytes("row1");
084    byte[] fam1 = Bytes.toBytes("fam1");
085
086    HTableDescriptor htd = TEST_HTD;
087    HColumnDescriptor hcd = new HColumnDescriptor(fam1);
088    if (htd.hasFamily(hcd.getName())) {
089      htd.modifyFamily(hcd);
090    } else {
091      htd.addFamily(hcd);
092    }
093
094    final HRegionInfo hri =
095      new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW,
096        HConstants.EMPTY_END_ROW);
097    HRegion region =
098        HBaseTestingUtility.createRegionAndWAL(hri, rootRegionDir, HTU.getConfiguration(), htd);
099    try {
100      // Add 5 cells to memstore
101      for (int i = 0; i < 5 ; i++) {
102        Put put = new Put(row1);
103
104        byte[] value = new byte[5 * 1024 * 1024];
105        put.addColumn(fam1, Bytes.toBytes("col_" + i), value);
106        region.put(put);
107        region.flush(true);
108      }
109
110      Get get = new Get(row1);
111      region.get(get);
112    } finally {
113      HBaseTestingUtility.closeRegionAndWAL(region);
114    }
115  }
116
117  /**
118   * Usecase:
119   *
120   *  - create a row with 1M cells, 10 bytes in each
121   *  - flush & run major compaction
122   *  - try to Get whole row.
123   *
124   *  OOME happened in StoreScanner.next(..).
125   *
126   * @throws IOException
127   */
128  @Test(expected = RowTooBigException.class)
129  public void testScanAcrossManySmallColumns() throws IOException {
130    byte[] row1 = Bytes.toBytes("row1");
131    byte[] fam1 = Bytes.toBytes("fam1");
132
133    HTableDescriptor htd = TEST_HTD;
134    HColumnDescriptor hcd = new HColumnDescriptor(fam1);
135    if (htd.hasFamily(hcd.getName())) {
136      htd.modifyFamily(hcd);
137    } else {
138      htd.addFamily(hcd);
139    }
140
141    final HRegionInfo hri =
142      new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW,
143        HConstants.EMPTY_END_ROW);
144    HRegion region =
145        HBaseTestingUtility.createRegionAndWAL(hri, rootRegionDir, HTU.getConfiguration(), htd);
146    try {
147      // Add to memstore
148      for (int i = 0; i < 10; i++) {
149        Put put = new Put(row1);
150        for (int j = 0; j < 10 * 10000; j++) {
151          byte[] value = new byte[10];
152          put.addColumn(fam1, Bytes.toBytes("col_" + i + "_" + j), value);
153        }
154        region.put(put);
155        region.flush(true);
156      }
157      region.compact(true);
158
159      Get get = new Get(row1);
160      region.get(get);
161    } finally {
162      HBaseTestingUtility.closeRegionAndWAL(region);
163    }
164  }
165}