001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.quotas; 019 020import static org.hamcrest.CoreMatchers.instanceOf; 021import static org.hamcrest.MatcherAssert.assertThat; 022import static org.junit.jupiter.api.Assertions.assertEquals; 023import static org.junit.jupiter.api.Assertions.assertNull; 024import static org.junit.jupiter.api.Assertions.assertTrue; 025import static org.junit.jupiter.api.Assertions.fail; 026 027import java.io.IOException; 028import java.util.HashMap; 029import java.util.List; 030import java.util.Map; 031import java.util.concurrent.atomic.AtomicLong; 032import org.apache.hadoop.conf.Configuration; 033import org.apache.hadoop.fs.FileStatus; 034import org.apache.hadoop.fs.FileSystem; 035import org.apache.hadoop.fs.Path; 036import org.apache.hadoop.hbase.HBaseTestingUtil; 037import org.apache.hadoop.hbase.TableName; 038import org.apache.hadoop.hbase.client.Put; 039import org.apache.hadoop.hbase.client.RegionInfo; 040import org.apache.hadoop.hbase.client.ResultScanner; 041import org.apache.hadoop.hbase.client.Scan; 042import org.apache.hadoop.hbase.client.Table; 043import org.apache.hadoop.hbase.master.HMaster; 044import org.apache.hadoop.hbase.quotas.policies.DefaultViolationPolicyEnforcement; 045import org.apache.hadoop.hbase.regionserver.HRegionServer; 046import org.apache.hadoop.hbase.testclassification.MediumTests; 047import org.apache.hadoop.hbase.tool.BulkLoadHFiles; 048import org.apache.hadoop.hbase.util.Bytes; 049import org.junit.jupiter.api.AfterAll; 050import org.junit.jupiter.api.BeforeAll; 051import org.junit.jupiter.api.BeforeEach; 052import org.junit.jupiter.api.Tag; 053import org.junit.jupiter.api.Test; 054import org.junit.jupiter.api.TestInfo; 055import org.slf4j.Logger; 056import org.slf4j.LoggerFactory; 057 058@Tag(MediumTests.TAG) 059public class TestSpaceQuotaOnBulkLoad { 060 061 private static final Logger LOG = LoggerFactory.getLogger(TestSpaceQuotaOnBulkLoad.class); 062 private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); 063 064 private SpaceQuotaHelperForTests helper; 065 private TestInfo testInfo; 066 067 @BeforeAll 068 public static void setUp() throws Exception { 069 Configuration conf = TEST_UTIL.getConfiguration(); 070 SpaceQuotaHelperForTests.updateConfigForQuotas(conf); 071 TEST_UTIL.startMiniCluster(1); 072 } 073 074 @AfterAll 075 public static void tearDown() throws Exception { 076 TEST_UTIL.shutdownMiniCluster(); 077 } 078 079 @BeforeEach 080 public void removeAllQuotas(TestInfo testInfo) throws Exception { 081 this.testInfo = testInfo; 082 helper = new SpaceQuotaHelperForTests(TEST_UTIL, () -> testInfo.getTestMethod().get().getName(), 083 new AtomicLong(0)); 084 helper.removeAllQuotas(); 085 } 086 087 @Test 088 public void testNoBulkLoadsWithNoWrites() throws Exception { 089 Put p = new Put(Bytes.toBytes("to_reject")); 090 p.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), 091 Bytes.toBytes("reject")); 092 TableName tableName = 093 helper.writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_WRITES, p); 094 095 // The table is now in violation. Try to do a bulk load 096 Map<byte[], List<Path>> family2Files = helper.generateFileToLoad(tableName, 1, 50); 097 try { 098 BulkLoadHFiles.create(TEST_UTIL.getConfiguration()).bulkLoad(tableName, family2Files); 099 fail("Expected the bulk load call to fail!"); 100 } catch (IOException e) { 101 // Pass 102 assertThat(e.getCause(), instanceOf(SpaceLimitingException.class)); 103 LOG.trace("Caught expected exception", e); 104 } 105 } 106 107 @Test 108 public void testAtomicBulkLoadUnderQuota() throws Exception { 109 // Need to verify that if the batch of hfiles cannot be loaded, none are loaded. 110 TableName tn = helper.createTableWithRegions(10); 111 112 final long sizeLimit = 50L * SpaceQuotaHelperForTests.ONE_KILOBYTE; 113 QuotaSettings settings = 114 QuotaSettingsFactory.limitTableSpace(tn, sizeLimit, SpaceViolationPolicy.NO_INSERTS); 115 TEST_UTIL.getAdmin().setQuota(settings); 116 117 HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); 118 RegionServerSpaceQuotaManager spaceQuotaManager = rs.getRegionServerSpaceQuotaManager(); 119 Map<TableName, SpaceQuotaSnapshot> snapshots = spaceQuotaManager.copyQuotaSnapshots(); 120 Map<RegionInfo, Long> regionSizes = getReportedSizesForTable(tn); 121 while (true) { 122 SpaceQuotaSnapshot snapshot = snapshots.get(tn); 123 if (snapshot != null && snapshot.getLimit() > 0) { 124 break; 125 } 126 LOG.debug("Snapshot does not yet realize quota limit: " + snapshots + ", regionsizes: " 127 + regionSizes); 128 Thread.sleep(3000); 129 snapshots = spaceQuotaManager.copyQuotaSnapshots(); 130 regionSizes = getReportedSizesForTable(tn); 131 } 132 // Our quota limit should be reflected in the latest snapshot 133 SpaceQuotaSnapshot snapshot = snapshots.get(tn); 134 assertEquals(0L, snapshot.getUsage()); 135 assertEquals(sizeLimit, snapshot.getLimit()); 136 137 // We would also not have a "real" policy in violation 138 ActivePolicyEnforcement activePolicies = spaceQuotaManager.getActiveEnforcements(); 139 SpaceViolationPolicyEnforcement enforcement = activePolicies.getPolicyEnforcement(tn); 140 assertTrue(enforcement instanceof DefaultViolationPolicyEnforcement, 141 "Expected to find Noop policy, but got " + enforcement.getClass().getSimpleName()); 142 143 // Should generate two files, each of which is over 25KB each 144 Map<byte[], List<Path>> family2Files = helper.generateFileToLoad(tn, 2, 525); 145 FileSystem fs = TEST_UTIL.getTestFileSystem(); 146 FileStatus[] files = fs.listStatus( 147 new Path(fs.getHomeDirectory(), testInfo.getTestMethod().get().getName() + "_files")); 148 for (FileStatus file : files) { 149 assertTrue(file.getLen() > 25 * SpaceQuotaHelperForTests.ONE_KILOBYTE, "Expected the file, " 150 + file.getPath() + ", length to be larger than 25KB, but was " + file.getLen()); 151 LOG.debug(file.getPath() + " -> " + file.getLen() + "B"); 152 } 153 154 try { 155 BulkLoadHFiles.create(TEST_UTIL.getConfiguration()).bulkLoad(tn, family2Files); 156 fail("Expected the bulk load call to fail!"); 157 } catch (IOException e) { 158 // Pass 159 assertThat(e.getCause(), instanceOf(SpaceLimitingException.class)); 160 LOG.trace("Caught expected exception", e); 161 } 162 // Verify that we have no data in the table because neither file should have been 163 // loaded even though one of the files could have. 164 Table table = TEST_UTIL.getConnection().getTable(tn); 165 ResultScanner scanner = table.getScanner(new Scan()); 166 try { 167 assertNull(scanner.next(), "Expected no results"); 168 } finally { 169 scanner.close(); 170 } 171 } 172 173 private Map<RegionInfo, Long> getReportedSizesForTable(TableName tn) { 174 HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); 175 MasterQuotaManager quotaManager = master.getMasterQuotaManager(); 176 Map<RegionInfo, Long> filteredRegionSizes = new HashMap<>(); 177 for (Map.Entry<RegionInfo, Long> entry : quotaManager.snapshotRegionSizes().entrySet()) { 178 if (entry.getKey().getTable().equals(tn)) { 179 filteredRegionSizes.put(entry.getKey(), entry.getValue()); 180 } 181 } 182 return filteredRegionSizes; 183 } 184}