001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import java.io.IOException;
021import org.apache.hadoop.fs.Path;
022import org.apache.hadoop.hbase.HBaseClassTestRule;
023import org.apache.hadoop.hbase.HBaseTestingUtil;
024import org.apache.hadoop.hbase.HConstants;
025import org.apache.hadoop.hbase.TableName;
026import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
027import org.apache.hadoop.hbase.client.Get;
028import org.apache.hadoop.hbase.client.Put;
029import org.apache.hadoop.hbase.client.RegionInfo;
030import org.apache.hadoop.hbase.client.RegionInfoBuilder;
031import org.apache.hadoop.hbase.client.RowTooBigException;
032import org.apache.hadoop.hbase.client.TableDescriptor;
033import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
034import org.apache.hadoop.hbase.testclassification.MediumTests;
035import org.apache.hadoop.hbase.testclassification.RegionServerTests;
036import org.apache.hadoop.hbase.util.Bytes;
037import org.junit.AfterClass;
038import org.junit.BeforeClass;
039import org.junit.ClassRule;
040import org.junit.Test;
041import org.junit.experimental.categories.Category;
042
043/**
044 * Test case to check HRS throws {@link org.apache.hadoop.hbase.client.RowTooBigException} when row
045 * size exceeds configured limits.
046 */
047@Category({ RegionServerTests.class, MediumTests.class })
048public class TestRowTooBig {
049
050  @ClassRule
051  public static final HBaseClassTestRule CLASS_RULE =
052    HBaseClassTestRule.forClass(TestRowTooBig.class);
053
054  private final static HBaseTestingUtil HTU = new HBaseTestingUtil();
055  private static Path rootRegionDir;
056  private static final TableDescriptor TEST_TD = TableDescriptorBuilder
057    .newBuilder(TableName.valueOf(TestRowTooBig.class.getSimpleName())).build();
058
059  @BeforeClass
060  public static void before() throws Exception {
061    HTU.startMiniCluster();
062    HTU.getConfiguration().setLong(HConstants.TABLE_MAX_ROWSIZE_KEY, 10 * 1024 * 1024L);
063    rootRegionDir = HTU.getDataTestDirOnTestFS("TestRowTooBig");
064  }
065
066  @AfterClass
067  public static void after() throws Exception {
068    HTU.shutdownMiniCluster();
069  }
070
071  /**
072   * Usecase: - create a row with 5 large cells (5 Mb each) - flush memstore but don't compact
073   * storefiles. - try to Get whole row. OOME happened before we actually get to reading results,
074   * but during seeking, as each StoreFile gets it's own scanner, and each scanner seeks after the
075   * first KV.
076   */
077  @Test(expected = RowTooBigException.class)
078  public void testScannersSeekOnFewLargeCells() throws IOException {
079    byte[] row1 = Bytes.toBytes("row1");
080    byte[] fam1 = Bytes.toBytes("fam1");
081
082    TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TD)
083      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build();
084
085    final RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
086    HRegion region = HBaseTestingUtil.createRegionAndWAL(hri, rootRegionDir, HTU.getConfiguration(),
087      tableDescriptor);
088    try {
089      // Add 5 cells to memstore
090      for (int i = 0; i < 5; i++) {
091        Put put = new Put(row1);
092
093        byte[] value = new byte[5 * 1024 * 1024];
094        put.addColumn(fam1, Bytes.toBytes("col_" + i), value);
095        region.put(put);
096        region.flush(true);
097      }
098
099      Get get = new Get(row1);
100      region.get(get);
101    } finally {
102      HBaseTestingUtil.closeRegionAndWAL(region);
103    }
104  }
105
106  /**
107   * Usecase: - create a row with 1M cells, 10 bytes in each - flush & run major compaction - try to
108   * Get whole row. OOME happened in StoreScanner.next(..).
109   */
110  @Test(expected = RowTooBigException.class)
111  public void testScanAcrossManySmallColumns() throws IOException {
112    byte[] row1 = Bytes.toBytes("row1");
113    byte[] fam1 = Bytes.toBytes("fam1");
114
115    TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TD)
116      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build();
117
118    final RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
119    HRegion region = HBaseTestingUtil.createRegionAndWAL(hri, rootRegionDir, HTU.getConfiguration(),
120      tableDescriptor);
121    try {
122      // Add to memstore
123      for (int i = 0; i < 10; i++) {
124        Put put = new Put(row1);
125        for (int j = 0; j < 10 * 10000; j++) {
126          byte[] value = new byte[10];
127          put.addColumn(fam1, Bytes.toBytes("col_" + i + "_" + j), value);
128        }
129        region.put(put);
130        region.flush(true);
131      }
132      region.compact(true);
133
134      Get get = new Get(row1);
135      region.get(get);
136    } finally {
137      HBaseTestingUtil.closeRegionAndWAL(region);
138    }
139  }
140}