001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.quotas;
019
020import static org.junit.Assert.assertTrue;
021
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.List;
025import java.util.Map;
026import java.util.Map.Entry;
027import java.util.Random;
028import org.apache.hadoop.conf.Configuration;
029import org.apache.hadoop.hbase.HBaseClassTestRule;
030import org.apache.hadoop.hbase.HBaseTestingUtility;
031import org.apache.hadoop.hbase.MiniHBaseCluster;
032import org.apache.hadoop.hbase.TableName;
033import org.apache.hadoop.hbase.client.Admin;
034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
035import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
036import org.apache.hadoop.hbase.client.Connection;
037import org.apache.hadoop.hbase.client.Put;
038import org.apache.hadoop.hbase.client.RegionInfo;
039import org.apache.hadoop.hbase.client.Table;
040import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
041import org.apache.hadoop.hbase.master.HMaster;
042import org.apache.hadoop.hbase.testclassification.MediumTests;
043import org.apache.hadoop.hbase.util.Bytes;
044import org.junit.After;
045import org.junit.Before;
046import org.junit.ClassRule;
047import org.junit.Rule;
048import org.junit.Test;
049import org.junit.experimental.categories.Category;
050import org.junit.rules.TestName;
051import org.slf4j.Logger;
052import org.slf4j.LoggerFactory;
053
054/**
055 * Test class which verifies that region sizes are reported to the master.
056 */
057@Category(MediumTests.class)
058public class TestRegionSizeUse {
059
060  @ClassRule
061  public static final HBaseClassTestRule CLASS_RULE =
062      HBaseClassTestRule.forClass(TestRegionSizeUse.class);
063
064  private static final Logger LOG = LoggerFactory.getLogger(TestRegionSizeUse.class);
065  private static final int SIZE_PER_VALUE = 256;
066  private static final int NUM_SPLITS = 10;
067  private static final String F1 = "f1";
068  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
069
070  private MiniHBaseCluster cluster;
071
072  @Rule
073  public TestName testName = new TestName();
074
075  @Before
076  public void setUp() throws Exception {
077    Configuration conf = TEST_UTIL.getConfiguration();
078    // Increase the frequency of some of the chores for responsiveness of the test
079    SpaceQuotaHelperForTests.updateConfigForQuotas(conf);
080    cluster = TEST_UTIL.startMiniCluster(2);
081  }
082
083  @After
084  public void tearDown() throws Exception {
085    TEST_UTIL.shutdownMiniCluster();
086  }
087
088  @Test
089  public void testBasicRegionSizeReports() throws Exception {
090    final long bytesWritten = 5L * 1024L * 1024L; // 5MB
091    final TableName tn = writeData(bytesWritten);
092    LOG.debug("Data was written to HBase");
093    final Admin admin = TEST_UTIL.getAdmin();
094    // Push the data to disk.
095    admin.flush(tn);
096    LOG.debug("Data flushed to disk");
097    // Get the final region distribution
098    final List<RegionInfo> regions = TEST_UTIL.getAdmin().getRegions(tn);
099
100    HMaster master = cluster.getMaster();
101    MasterQuotaManager quotaManager = master.getMasterQuotaManager();
102    Map<RegionInfo,Long> regionSizes = quotaManager.snapshotRegionSizes();
103    // Wait until we get all of the region reports for our table
104    // The table may split, so make sure we have at least as many as expected right after we
105    // finished writing the data.
106    int observedRegions = numRegionsForTable(tn, regionSizes);
107    while (observedRegions < regions.size()) {
108      LOG.debug("Expecting more regions. Saw " + observedRegions
109          + " region sizes reported, expected at least " + regions.size());
110      Thread.sleep(1000);
111      regionSizes = quotaManager.snapshotRegionSizes();
112      observedRegions = numRegionsForTable(tn, regionSizes);
113    }
114
115    LOG.debug("Observed region sizes by the HMaster: " + regionSizes);
116    long totalRegionSize = 0L;
117    for (Long regionSize : regionSizes.values()) {
118      totalRegionSize += regionSize;
119    }
120    assertTrue("Expected region size report to exceed " + bytesWritten + ", but was "
121        + totalRegionSize + ". RegionSizes=" + regionSizes, bytesWritten < totalRegionSize);
122  }
123
124  /**
125   * Writes at least {@code sizeInBytes} bytes of data to HBase and returns the TableName used.
126   *
127   * @param sizeInBytes The amount of data to write in bytes.
128   * @return The table the data was written to
129   */
130  private TableName writeData(long sizeInBytes) throws IOException {
131    final Connection conn = TEST_UTIL.getConnection();
132    final Admin admin = TEST_UTIL.getAdmin();
133    final TableName tn = TableName.valueOf(testName.getMethodName());
134
135    // Delete the old table
136    if (admin.tableExists(tn)) {
137      admin.disableTable(tn);
138      admin.deleteTable(tn);
139    }
140
141    // Create the table
142    TableDescriptorBuilder tableDescriptorBuilder =
143      TableDescriptorBuilder.newBuilder(tn);
144    ColumnFamilyDescriptor columnFamilyDescriptor =
145      ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(F1)).build();
146    tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor);
147
148    admin.createTable(tableDescriptorBuilder.build(), Bytes.toBytes("1"),
149      Bytes.toBytes("9"), NUM_SPLITS);
150
151    final Table table = conn.getTable(tn);
152    try {
153      List<Put> updates = new ArrayList<>();
154      long bytesToWrite = sizeInBytes;
155      long rowKeyId = 0L;
156      final StringBuilder sb = new StringBuilder();
157      final Random r = new Random();
158      while (bytesToWrite > 0L) {
159        sb.setLength(0);
160        sb.append(Long.toString(rowKeyId));
161        // Use the reverse counter as the rowKey to get even spread across all regions
162        Put p = new Put(Bytes.toBytes(sb.reverse().toString()));
163        byte[] value = new byte[SIZE_PER_VALUE];
164        r.nextBytes(value);
165        p.addColumn(Bytes.toBytes(F1), Bytes.toBytes("q1"), value);
166        updates.add(p);
167
168        // Batch 50K worth of updates
169        if (updates.size() > 50) {
170          table.put(updates);
171          updates.clear();
172        }
173
174        // Just count the value size, ignore the size of rowkey + column
175        bytesToWrite -= SIZE_PER_VALUE;
176        rowKeyId++;
177      }
178
179      // Write the final batch
180      if (!updates.isEmpty()) {
181        table.put(updates);
182      }
183
184      return tn;
185    } finally {
186      table.close();
187    }
188  }
189
190  /**
191   * Computes the number of regions for the given table that have a positive size.
192   *
193   * @param tn The TableName in question
194   * @param regions A collection of region sizes
195   * @return The number of regions for the given table.
196   */
197  private int numRegionsForTable(TableName tn, Map<RegionInfo,Long> regions) {
198    int sum = 0;
199    for (Entry<RegionInfo,Long> entry : regions.entrySet()) {
200      if (tn.equals(entry.getKey().getTable()) && 0 < entry.getValue()) {
201        sum++;
202      }
203    }
204    return sum;
205  }
206}