001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.quotas; 019 020import static org.junit.jupiter.api.Assertions.assertTrue; 021 022import java.io.IOException; 023import java.util.ArrayList; 024import java.util.List; 025import java.util.Map; 026import java.util.Map.Entry; 027import org.apache.hadoop.conf.Configuration; 028import org.apache.hadoop.hbase.HBaseTestingUtil; 029import org.apache.hadoop.hbase.SingleProcessHBaseCluster; 030import org.apache.hadoop.hbase.TableName; 031import org.apache.hadoop.hbase.client.Admin; 032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 033import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 034import org.apache.hadoop.hbase.client.Connection; 035import org.apache.hadoop.hbase.client.Put; 036import org.apache.hadoop.hbase.client.RegionInfo; 037import org.apache.hadoop.hbase.client.Table; 038import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 039import org.apache.hadoop.hbase.master.HMaster; 040import org.apache.hadoop.hbase.testclassification.MediumTests; 041import org.apache.hadoop.hbase.util.Bytes; 042import org.junit.jupiter.api.AfterEach; 043import org.junit.jupiter.api.BeforeEach; 044import org.junit.jupiter.api.Tag; 045import org.junit.jupiter.api.Test; 046import org.junit.jupiter.api.TestInfo; 047import org.slf4j.Logger; 048import org.slf4j.LoggerFactory; 049 050/** 051 * Test class which verifies that region sizes are reported to the master. 052 */ 053@Tag(MediumTests.TAG) 054public class TestRegionSizeUse { 055 056 private static final Logger LOG = LoggerFactory.getLogger(TestRegionSizeUse.class); 057 private static final int SIZE_PER_VALUE = 256; 058 private static final int NUM_SPLITS = 10; 059 private static final String F1 = "f1"; 060 private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); 061 062 private SingleProcessHBaseCluster cluster; 063 064 @BeforeEach 065 public void setUp() throws Exception { 066 Configuration conf = TEST_UTIL.getConfiguration(); 067 // Increase the frequency of some of the chores for responsiveness of the test 068 SpaceQuotaHelperForTests.updateConfigForQuotas(conf); 069 cluster = TEST_UTIL.startMiniCluster(2); 070 } 071 072 @AfterEach 073 public void tearDown() throws Exception { 074 TEST_UTIL.shutdownMiniCluster(); 075 } 076 077 @Test 078 public void testBasicRegionSizeReports(TestInfo testInfo) throws Exception { 079 final long bytesWritten = 5L * 1024L * 1024L; // 5MB 080 final TableName tn = writeData(bytesWritten, testInfo); 081 LOG.debug("Data was written to HBase"); 082 final Admin admin = TEST_UTIL.getAdmin(); 083 // Push the data to disk. 084 admin.flush(tn); 085 LOG.debug("Data flushed to disk"); 086 // Get the final region distribution 087 final List<RegionInfo> regions = TEST_UTIL.getAdmin().getRegions(tn); 088 089 HMaster master = cluster.getMaster(); 090 MasterQuotaManager quotaManager = master.getMasterQuotaManager(); 091 Map<RegionInfo, Long> regionSizes = quotaManager.snapshotRegionSizes(); 092 // Wait until we get all of the region reports for our table 093 // The table may split, so make sure we have at least as many as expected right after we 094 // finished writing the data. 095 int observedRegions = numRegionsForTable(tn, regionSizes); 096 while (observedRegions < regions.size()) { 097 LOG.debug("Expecting more regions. Saw " + observedRegions 098 + " region sizes reported, expected at least " + regions.size()); 099 Thread.sleep(1000); 100 regionSizes = quotaManager.snapshotRegionSizes(); 101 observedRegions = numRegionsForTable(tn, regionSizes); 102 } 103 104 LOG.debug("Observed region sizes by the HMaster: " + regionSizes); 105 long totalRegionSize = 0L; 106 for (Long regionSize : regionSizes.values()) { 107 totalRegionSize += regionSize; 108 } 109 assertTrue(bytesWritten < totalRegionSize, "Expected region size report to exceed " 110 + bytesWritten + ", but was " + totalRegionSize + ". RegionSizes=" + regionSizes); 111 } 112 113 /** 114 * Writes at least {@code sizeInBytes} bytes of data to HBase and returns the TableName used. 115 * @param sizeInBytes The amount of data to write in bytes. 116 * @return The table the data was written to 117 */ 118 private TableName writeData(long sizeInBytes, TestInfo testInfo) throws IOException { 119 final Connection conn = TEST_UTIL.getConnection(); 120 final Admin admin = TEST_UTIL.getAdmin(); 121 final TableName tn = TableName.valueOf(testInfo.getTestMethod().get().getName()); 122 123 // Delete the old table 124 if (admin.tableExists(tn)) { 125 admin.disableTable(tn); 126 admin.deleteTable(tn); 127 } 128 129 // Create the table 130 TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tn); 131 ColumnFamilyDescriptor columnFamilyDescriptor = 132 ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(F1)).build(); 133 tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); 134 135 admin.createTable(tableDescriptorBuilder.build(), Bytes.toBytes("1"), Bytes.toBytes("9"), 136 NUM_SPLITS); 137 138 final Table table = conn.getTable(tn); 139 try { 140 List<Put> updates = new ArrayList<>(); 141 long bytesToWrite = sizeInBytes; 142 long rowKeyId = 0L; 143 final StringBuilder sb = new StringBuilder(); 144 while (bytesToWrite > 0L) { 145 sb.setLength(0); 146 sb.append(Long.toString(rowKeyId)); 147 // Use the reverse counter as the rowKey to get even spread across all regions 148 Put p = new Put(Bytes.toBytes(sb.reverse().toString())); 149 byte[] value = new byte[SIZE_PER_VALUE]; 150 Bytes.random(value); 151 p.addColumn(Bytes.toBytes(F1), Bytes.toBytes("q1"), value); 152 updates.add(p); 153 154 // Batch 50K worth of updates 155 if (updates.size() > 50) { 156 table.put(updates); 157 updates.clear(); 158 } 159 160 // Just count the value size, ignore the size of rowkey + column 161 bytesToWrite -= SIZE_PER_VALUE; 162 rowKeyId++; 163 } 164 165 // Write the final batch 166 if (!updates.isEmpty()) { 167 table.put(updates); 168 } 169 170 return tn; 171 } finally { 172 table.close(); 173 } 174 } 175 176 /** 177 * Computes the number of regions for the given table that have a positive size. 178 * @param tn The TableName in question 179 * @param regions A collection of region sizes 180 * @return The number of regions for the given table. 181 */ 182 private int numRegionsForTable(TableName tn, Map<RegionInfo, Long> regions) { 183 int sum = 0; 184 for (Entry<RegionInfo, Long> entry : regions.entrySet()) { 185 if (tn.equals(entry.getKey().getTable()) && 0 < entry.getValue()) { 186 sum++; 187 } 188 } 189 return sum; 190 } 191}