001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.quotas; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertTrue; 022 023import java.util.Collections; 024import java.util.List; 025import java.util.concurrent.atomic.AtomicLong; 026import java.util.stream.Collectors; 027import org.apache.hadoop.conf.Configuration; 028import org.apache.hadoop.fs.FileStatus; 029import org.apache.hadoop.fs.FileSystem; 030import org.apache.hadoop.fs.Path; 031import org.apache.hadoop.hbase.HBaseClassTestRule; 032import org.apache.hadoop.hbase.HBaseTestingUtility; 033import org.apache.hadoop.hbase.ServerName; 034import org.apache.hadoop.hbase.TableName; 035import org.apache.hadoop.hbase.client.Admin; 036import org.apache.hadoop.hbase.client.ClientServiceCallable; 037import org.apache.hadoop.hbase.client.Connection; 038import org.apache.hadoop.hbase.client.Result; 039import org.apache.hadoop.hbase.client.RpcRetryingCaller; 040import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; 041import org.apache.hadoop.hbase.client.SnapshotType; 042import org.apache.hadoop.hbase.client.Table; 043import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate; 044import org.apache.hadoop.hbase.regionserver.HRegion; 045import org.apache.hadoop.hbase.regionserver.Region; 046import org.apache.hadoop.hbase.regionserver.Store; 047import org.apache.hadoop.hbase.testclassification.MediumTests; 048import org.junit.AfterClass; 049import org.junit.Before; 050import org.junit.BeforeClass; 051import org.junit.ClassRule; 052import org.junit.Rule; 053import org.junit.Test; 054import org.junit.experimental.categories.Category; 055import org.junit.rules.TestName; 056 057import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; 058 059@Category({ MediumTests.class }) 060public class TestLowLatencySpaceQuotas { 061 062 @ClassRule 063 public static final HBaseClassTestRule CLASS_RULE = 064 HBaseClassTestRule.forClass(TestLowLatencySpaceQuotas.class); 065 066 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 067 // Global for all tests in the class 068 private static final AtomicLong COUNTER = new AtomicLong(0); 069 070 @Rule 071 public TestName testName = new TestName(); 072 private SpaceQuotaHelperForTests helper; 073 private Connection conn; 074 private Admin admin; 075 076 @BeforeClass 077 public static void setup() throws Exception { 078 Configuration conf = TEST_UTIL.getConfiguration(); 079 // The default 1s period for QuotaObserverChore is good. 080 SpaceQuotaHelperForTests.updateConfigForQuotas(conf); 081 // Set the period/delay to read region size from HDFS to be very long 082 conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, 1000 * 120); 083 conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_KEY, 1000 * 120); 084 // Set the same long period/delay to compute snapshot sizes 085 conf.setInt(SnapshotQuotaObserverChore.SNAPSHOT_QUOTA_CHORE_PERIOD_KEY, 1000 * 120); 086 conf.setInt(SnapshotQuotaObserverChore.SNAPSHOT_QUOTA_CHORE_DELAY_KEY, 1000 * 120); 087 // Clean up the compacted files faster than normal (5s instead of 2mins) 088 conf.setInt("hbase.hfile.compaction.discharger.interval", 5 * 1000); 089 090 TEST_UTIL.startMiniCluster(1); 091 } 092 093 @AfterClass 094 public static void tearDown() throws Exception { 095 TEST_UTIL.shutdownMiniCluster(); 096 } 097 098 @Before 099 public void removeAllQuotas() throws Exception { 100 helper = new SpaceQuotaHelperForTests(TEST_UTIL, testName, COUNTER); 101 conn = TEST_UTIL.getConnection(); 102 admin = TEST_UTIL.getAdmin(); 103 helper.waitForQuotaTable(conn); 104 } 105 106 @Test 107 public void testFlushes() throws Exception { 108 TableName tn = helper.createTableWithRegions(1); 109 // Set a quota 110 QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, 111 SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); 112 admin.setQuota(settings); 113 114 // Write some data 115 final long initialSize = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE; 116 helper.writeData(tn, initialSize); 117 118 // Make sure a flush happened 119 admin.flush(tn); 120 121 // We should be able to observe the system recording an increase in size (even 122 // though we know the filesystem scanning did not happen). 123 TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { 124 @Override 125 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 126 return snapshot.getUsage() >= initialSize; 127 } 128 }); 129 } 130 131 @Test 132 public void testMajorCompaction() throws Exception { 133 TableName tn = helper.createTableWithRegions(1); 134 // Set a quota 135 QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, 136 SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); 137 admin.setQuota(settings); 138 139 // Write some data and flush it to disk. 140 final long sizePerBatch = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE; 141 helper.writeData(tn, sizePerBatch); 142 admin.flush(tn); 143 144 // Write the same data again, flushing it to a second file 145 helper.writeData(tn, sizePerBatch); 146 admin.flush(tn); 147 148 // After two flushes, both hfiles would contain similar data. We should see 2x the data. 149 TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { 150 @Override 151 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 152 return snapshot.getUsage() >= 2L * sizePerBatch; 153 } 154 }); 155 156 // Rewrite the two files into one. 157 admin.majorCompact(tn); 158 159 // After we major compact the table, we should notice quickly that the amount of data in the 160 // table is much closer to reality (the duplicate entries across the two files are removed). 161 TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { 162 @Override 163 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 164 return snapshot.getUsage() >= sizePerBatch && snapshot.getUsage() <= 2L * sizePerBatch; 165 } 166 }); 167 } 168 169 @Test 170 public void testMinorCompaction() throws Exception { 171 TableName tn = helper.createTableWithRegions(1); 172 // Set a quota 173 QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, 174 SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); 175 admin.setQuota(settings); 176 177 // Write some data and flush it to disk. 178 final long sizePerBatch = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE; 179 final long numBatches = 6; 180 for (long i = 0; i < numBatches; i++) { 181 helper.writeData(tn, sizePerBatch); 182 admin.flush(tn); 183 } 184 185 HRegion region = Iterables.getOnlyElement(TEST_UTIL.getHBaseCluster().getRegions(tn)); 186 long numFiles = getNumHFilesForRegion(region); 187 assertEquals(numBatches, numFiles); 188 189 // After two flushes, both hfiles would contain similar data. We should see 2x the data. 190 TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { 191 @Override 192 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 193 return snapshot.getUsage() >= numFiles * sizePerBatch; 194 } 195 }); 196 197 // Rewrite some files into fewer 198 TEST_UTIL.compact(tn, false); 199 long numFilesAfterMinorCompaction = getNumHFilesForRegion(region); 200 201 // After we major compact the table, we should notice quickly that the amount of data in the 202 // table is much closer to reality (the duplicate entries across the two files are removed). 203 TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { 204 @Override 205 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 206 return snapshot.getUsage() >= numFilesAfterMinorCompaction * sizePerBatch 207 && snapshot.getUsage() <= (numFilesAfterMinorCompaction + 1) * sizePerBatch; 208 } 209 }); 210 } 211 212 private long getNumHFilesForRegion(HRegion region) { 213 return region.getStores().stream().mapToLong((s) -> s.getNumHFiles()).sum(); 214 } 215 216 @Test 217 public void testBulkLoading() throws Exception { 218 TableName tn = helper.createTableWithRegions(1); 219 // Set a quota 220 QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, 221 SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); 222 admin.setQuota(settings); 223 admin.compactionSwitch(false, 224 admin.getRegionServers().stream().map(ServerName::toString).collect(Collectors.toList())); 225 226 ClientServiceCallable<Void> callable = helper.generateFileToLoad(tn, 3, 550); 227 // Make sure the files are about as long as we expect 228 FileSystem fs = TEST_UTIL.getTestFileSystem(); 229 FileStatus[] files = 230 fs.listStatus(new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files")); 231 long totalSize = 0; 232 for (FileStatus file : files) { 233 assertTrue("Expected the file, " + file.getPath() 234 + ", length to be larger than 25KB, but was " + file.getLen(), 235 file.getLen() > 25 * SpaceQuotaHelperForTests.ONE_KILOBYTE); 236 totalSize += file.getLen(); 237 } 238 239 RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(TEST_UTIL.getConfiguration()); 240 RpcRetryingCaller<Void> caller = factory.<Void> newCaller(); 241 caller.callWithRetries(callable, Integer.MAX_VALUE); 242 243 final long finalTotalSize = totalSize; 244 try { 245 TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { 246 @Override 247 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 248 return snapshot.getUsage() >= finalTotalSize; 249 } 250 }); 251 } finally { 252 admin.compactionSwitch(true, 253 admin.getRegionServers().stream().map(ServerName::toString).collect(Collectors.toList())); 254 } 255 } 256 257 @Test 258 public void testSnapshotSizes() throws Exception { 259 TableName tn = helper.createTableWithRegions(1); 260 // Set a quota 261 QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, 262 SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); 263 admin.setQuota(settings); 264 265 // Write some data and flush it to disk. 266 final long sizePerBatch = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE; 267 helper.writeData(tn, sizePerBatch); 268 admin.flush(tn); 269 270 final String snapshot1 = "snapshot1"; 271 admin.snapshot(snapshot1, tn, SnapshotType.SKIPFLUSH); 272 273 // Compute the size of the file for the Region we'll send to archive 274 Region region = Iterables.getOnlyElement(TEST_UTIL.getHBaseCluster().getRegions(tn)); 275 List<? extends Store> stores = region.getStores(); 276 long summer = 0; 277 for (Store store : stores) { 278 summer += store.getStorefilesSize(); 279 } 280 final long storeFileSize = summer; 281 282 // Wait for the table to show the usage 283 TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { 284 @Override 285 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 286 return snapshot.getUsage() == storeFileSize; 287 } 288 }); 289 290 // Spoof a "full" computation of snapshot size. Normally the chore handles this, but we want 291 // to test in the absence of this chore. 292 FileArchiverNotifier notifier = TEST_UTIL.getHBaseCluster().getMaster() 293 .getSnapshotQuotaObserverChore().getNotifierForTable(tn); 294 notifier.computeAndStoreSnapshotSizes(Collections.singletonList(snapshot1)); 295 296 // Force a major compaction to create a new file and push the old file to the archive 297 TEST_UTIL.compact(tn, true); 298 299 // After moving the old file to archive/, the space of this table should double 300 // We have a new file created by the majc referenced by the table and the snapshot still 301 // referencing the old file. 302 TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { 303 @Override 304 boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { 305 return snapshot.getUsage() >= 2 * storeFileSize; 306 } 307 }); 308 309 try (Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME)) { 310 Result r = quotaTable.get(QuotaTableUtil.makeGetForSnapshotSize(tn, snapshot1)); 311 assertTrue("Expected a non-null, non-empty Result", r != null && !r.isEmpty()); 312 assertTrue(r.advance()); 313 assertEquals("The snapshot's size should be the same as the origin store file", storeFileSize, 314 QuotaTableUtil.parseSnapshotSize(r.current())); 315 316 r = quotaTable.get(QuotaTableUtil.createGetNamespaceSnapshotSize(tn.getNamespaceAsString())); 317 assertTrue("Expected a non-null, non-empty Result", r != null && !r.isEmpty()); 318 assertTrue(r.advance()); 319 assertEquals("The snapshot's size should be the same as the origin store file", storeFileSize, 320 QuotaTableUtil.parseSnapshotSize(r.current())); 321 } 322 } 323}