001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.quotas;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertNull;
022import static org.junit.Assert.assertTrue;
023import static org.junit.Assert.fail;
024
025import java.util.HashMap;
026import java.util.Map;
027import java.util.concurrent.atomic.AtomicLong;
028import org.apache.hadoop.conf.Configuration;
029import org.apache.hadoop.fs.FileStatus;
030import org.apache.hadoop.fs.FileSystem;
031import org.apache.hadoop.fs.Path;
032import org.apache.hadoop.hbase.HBaseClassTestRule;
033import org.apache.hadoop.hbase.HBaseTestingUtility;
034import org.apache.hadoop.hbase.TableName;
035import org.apache.hadoop.hbase.client.ClientServiceCallable;
036import org.apache.hadoop.hbase.client.Put;
037import org.apache.hadoop.hbase.client.RegionInfo;
038import org.apache.hadoop.hbase.client.ResultScanner;
039import org.apache.hadoop.hbase.client.RpcRetryingCaller;
040import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
041import org.apache.hadoop.hbase.client.Scan;
042import org.apache.hadoop.hbase.client.Table;
043import org.apache.hadoop.hbase.master.HMaster;
044import org.apache.hadoop.hbase.quotas.policies.DefaultViolationPolicyEnforcement;
045import org.apache.hadoop.hbase.regionserver.HRegionServer;
046import org.apache.hadoop.hbase.testclassification.MediumTests;
047import org.apache.hadoop.hbase.util.Bytes;
048import org.junit.AfterClass;
049import org.junit.Before;
050import org.junit.BeforeClass;
051import org.junit.ClassRule;
052import org.junit.Rule;
053import org.junit.Test;
054import org.junit.experimental.categories.Category;
055import org.junit.rules.TestName;
056import org.slf4j.Logger;
057import org.slf4j.LoggerFactory;
058
059@Category(MediumTests.class)
060public class TestSpaceQuotaOnBulkLoad {
061
062  @ClassRule
063  public static final HBaseClassTestRule CLASS_RULE =
064    HBaseClassTestRule.forClass(TestSpaceQuotaOnBulkLoad.class);
065
066  private static final Logger LOG = LoggerFactory.getLogger(TestSpaceQuotaOnBulkLoad.class);
067  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
068
069  @Rule
070  public TestName testName = new TestName();
071  private SpaceQuotaHelperForTests helper;
072
073  @BeforeClass
074  public static void setUp() throws Exception {
075    Configuration conf = TEST_UTIL.getConfiguration();
076    SpaceQuotaHelperForTests.updateConfigForQuotas(conf);
077    TEST_UTIL.startMiniCluster(1);
078  }
079
080  @AfterClass
081  public static void tearDown() throws Exception {
082    TEST_UTIL.shutdownMiniCluster();
083  }
084
085  @Before
086  public void removeAllQuotas() throws Exception {
087    helper = new SpaceQuotaHelperForTests(TEST_UTIL, testName, new AtomicLong(0));
088    helper.removeAllQuotas();
089  }
090
091  @Test
092  public void testNoBulkLoadsWithNoWrites() throws Exception {
093    Put p = new Put(Bytes.toBytes("to_reject"));
094    p.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"),
095      Bytes.toBytes("reject"));
096    TableName tableName =
097      helper.writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_WRITES, p);
098
099    // The table is now in violation. Try to do a bulk load
100    ClientServiceCallable<Void> callable = helper.generateFileToLoad(tableName, 1, 50);
101    RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(TEST_UTIL.getConfiguration());
102    RpcRetryingCaller<Void> caller = factory.<Void> newCaller();
103    try {
104      caller.callWithRetries(callable, Integer.MAX_VALUE);
105      fail("Expected the bulk load call to fail!");
106    } catch (SpaceLimitingException e) {
107      // Pass
108      LOG.trace("Caught expected exception", e);
109    }
110  }
111
112  @Test
113  public void testAtomicBulkLoadUnderQuota() throws Exception {
114    // Need to verify that if the batch of hfiles cannot be loaded, none are loaded.
115    TableName tn = helper.createTableWithRegions(10);
116
117    final long sizeLimit = 50L * SpaceQuotaHelperForTests.ONE_KILOBYTE;
118    QuotaSettings settings =
119      QuotaSettingsFactory.limitTableSpace(tn, sizeLimit, SpaceViolationPolicy.NO_INSERTS);
120    TEST_UTIL.getAdmin().setQuota(settings);
121
122    HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
123    RegionServerSpaceQuotaManager spaceQuotaManager = rs.getRegionServerSpaceQuotaManager();
124    Map<TableName, SpaceQuotaSnapshot> snapshots = spaceQuotaManager.copyQuotaSnapshots();
125    Map<RegionInfo, Long> regionSizes = getReportedSizesForTable(tn);
126    while (true) {
127      SpaceQuotaSnapshot snapshot = snapshots.get(tn);
128      if (snapshot != null && snapshot.getLimit() > 0) {
129        break;
130      }
131      LOG.debug("Snapshot does not yet realize quota limit: " + snapshots + ", regionsizes: "
132        + regionSizes);
133      Thread.sleep(3000);
134      snapshots = spaceQuotaManager.copyQuotaSnapshots();
135      regionSizes = getReportedSizesForTable(tn);
136    }
137    // Our quota limit should be reflected in the latest snapshot
138    SpaceQuotaSnapshot snapshot = snapshots.get(tn);
139    assertEquals(0L, snapshot.getUsage());
140    assertEquals(sizeLimit, snapshot.getLimit());
141
142    // We would also not have a "real" policy in violation
143    ActivePolicyEnforcement activePolicies = spaceQuotaManager.getActiveEnforcements();
144    SpaceViolationPolicyEnforcement enforcement = activePolicies.getPolicyEnforcement(tn);
145    assertTrue("Expected to find Noop policy, but got " + enforcement.getClass().getSimpleName(),
146      enforcement instanceof DefaultViolationPolicyEnforcement);
147
148    // Should generate two files, each of which is over 25KB each
149    ClientServiceCallable<Void> callable = helper.generateFileToLoad(tn, 2, 500);
150    FileSystem fs = TEST_UTIL.getTestFileSystem();
151    FileStatus[] files =
152      fs.listStatus(new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files"));
153    for (FileStatus file : files) {
154      assertTrue("Expected the file, " + file.getPath()
155        + ",  length to be larger than 25KB, but was " + file.getLen(),
156        file.getLen() > 25 * SpaceQuotaHelperForTests.ONE_KILOBYTE);
157      LOG.debug(file.getPath() + " -> " + file.getLen() + "B");
158    }
159
160    RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(TEST_UTIL.getConfiguration());
161    RpcRetryingCaller<Void> caller = factory.<Void> newCaller();
162    try {
163      caller.callWithRetries(callable, Integer.MAX_VALUE);
164      fail("Expected the bulk load call to fail!");
165    } catch (SpaceLimitingException e) {
166      // Pass
167      LOG.trace("Caught expected exception", e);
168    }
169    // Verify that we have no data in the table because neither file should have been
170    // loaded even though one of the files could have.
171    Table table = TEST_UTIL.getConnection().getTable(tn);
172    ResultScanner scanner = table.getScanner(new Scan());
173    try {
174      assertNull("Expected no results", scanner.next());
175    } finally {
176      scanner.close();
177    }
178  }
179
180  private Map<RegionInfo, Long> getReportedSizesForTable(TableName tn) {
181    HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
182    MasterQuotaManager quotaManager = master.getMasterQuotaManager();
183    Map<RegionInfo, Long> filteredRegionSizes = new HashMap<>();
184    for (Map.Entry<RegionInfo, Long> entry : quotaManager.snapshotRegionSizes().entrySet()) {
185      if (entry.getKey().getTable().equals(tn)) {
186        filteredRegionSizes.put(entry.getKey(), entry.getValue());
187      }
188    }
189    return filteredRegionSizes;
190  }
191}