001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.quotas;
019
020import static org.hamcrest.CoreMatchers.instanceOf;
021import static org.hamcrest.MatcherAssert.assertThat;
022import static org.junit.Assert.assertEquals;
023import static org.junit.Assert.assertNull;
024import static org.junit.Assert.assertTrue;
025import static org.junit.Assert.fail;
026
027import java.io.IOException;
028import java.util.HashMap;
029import java.util.List;
030import java.util.Map;
031import java.util.concurrent.atomic.AtomicLong;
032import org.apache.hadoop.conf.Configuration;
033import org.apache.hadoop.fs.FileStatus;
034import org.apache.hadoop.fs.FileSystem;
035import org.apache.hadoop.fs.Path;
036import org.apache.hadoop.hbase.HBaseClassTestRule;
037import org.apache.hadoop.hbase.HBaseTestingUtil;
038import org.apache.hadoop.hbase.TableName;
039import org.apache.hadoop.hbase.client.Put;
040import org.apache.hadoop.hbase.client.RegionInfo;
041import org.apache.hadoop.hbase.client.ResultScanner;
042import org.apache.hadoop.hbase.client.Scan;
043import org.apache.hadoop.hbase.client.Table;
044import org.apache.hadoop.hbase.master.HMaster;
045import org.apache.hadoop.hbase.quotas.policies.DefaultViolationPolicyEnforcement;
046import org.apache.hadoop.hbase.regionserver.HRegionServer;
047import org.apache.hadoop.hbase.testclassification.MediumTests;
048import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
049import org.apache.hadoop.hbase.util.Bytes;
050import org.junit.AfterClass;
051import org.junit.Before;
052import org.junit.BeforeClass;
053import org.junit.ClassRule;
054import org.junit.Rule;
055import org.junit.Test;
056import org.junit.experimental.categories.Category;
057import org.junit.rules.TestName;
058import org.slf4j.Logger;
059import org.slf4j.LoggerFactory;
060
061@Category(MediumTests.class)
062public class TestSpaceQuotaOnBulkLoad {
063
064  @ClassRule
065  public static final HBaseClassTestRule CLASS_RULE =
066    HBaseClassTestRule.forClass(TestSpaceQuotaOnBulkLoad.class);
067
068  private static final Logger LOG = LoggerFactory.getLogger(TestSpaceQuotaOnBulkLoad.class);
069  private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
070
071  @Rule
072  public TestName testName = new TestName();
073  private SpaceQuotaHelperForTests helper;
074
075  @BeforeClass
076  public static void setUp() throws Exception {
077    Configuration conf = TEST_UTIL.getConfiguration();
078    SpaceQuotaHelperForTests.updateConfigForQuotas(conf);
079    TEST_UTIL.startMiniCluster(1);
080  }
081
082  @AfterClass
083  public static void tearDown() throws Exception {
084    TEST_UTIL.shutdownMiniCluster();
085  }
086
087  @Before
088  public void removeAllQuotas() throws Exception {
089    helper = new SpaceQuotaHelperForTests(TEST_UTIL, testName, new AtomicLong(0));
090    helper.removeAllQuotas();
091  }
092
093  @Test
094  public void testNoBulkLoadsWithNoWrites() throws Exception {
095    Put p = new Put(Bytes.toBytes("to_reject"));
096    p.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"),
097      Bytes.toBytes("reject"));
098    TableName tableName =
099      helper.writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_WRITES, p);
100
101    // The table is now in violation. Try to do a bulk load
102    Map<byte[], List<Path>> family2Files = helper.generateFileToLoad(tableName, 1, 50);
103    try {
104      BulkLoadHFiles.create(TEST_UTIL.getConfiguration()).bulkLoad(tableName, family2Files);
105      fail("Expected the bulk load call to fail!");
106    } catch (IOException e) {
107      // Pass
108      assertThat(e.getCause(), instanceOf(SpaceLimitingException.class));
109      LOG.trace("Caught expected exception", e);
110    }
111  }
112
113  @Test
114  public void testAtomicBulkLoadUnderQuota() throws Exception {
115    // Need to verify that if the batch of hfiles cannot be loaded, none are loaded.
116    TableName tn = helper.createTableWithRegions(10);
117
118    final long sizeLimit = 50L * SpaceQuotaHelperForTests.ONE_KILOBYTE;
119    QuotaSettings settings =
120      QuotaSettingsFactory.limitTableSpace(tn, sizeLimit, SpaceViolationPolicy.NO_INSERTS);
121    TEST_UTIL.getAdmin().setQuota(settings);
122
123    HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
124    RegionServerSpaceQuotaManager spaceQuotaManager = rs.getRegionServerSpaceQuotaManager();
125    Map<TableName, SpaceQuotaSnapshot> snapshots = spaceQuotaManager.copyQuotaSnapshots();
126    Map<RegionInfo, Long> regionSizes = getReportedSizesForTable(tn);
127    while (true) {
128      SpaceQuotaSnapshot snapshot = snapshots.get(tn);
129      if (snapshot != null && snapshot.getLimit() > 0) {
130        break;
131      }
132      LOG.debug("Snapshot does not yet realize quota limit: " + snapshots + ", regionsizes: "
133        + regionSizes);
134      Thread.sleep(3000);
135      snapshots = spaceQuotaManager.copyQuotaSnapshots();
136      regionSizes = getReportedSizesForTable(tn);
137    }
138    // Our quota limit should be reflected in the latest snapshot
139    SpaceQuotaSnapshot snapshot = snapshots.get(tn);
140    assertEquals(0L, snapshot.getUsage());
141    assertEquals(sizeLimit, snapshot.getLimit());
142
143    // We would also not have a "real" policy in violation
144    ActivePolicyEnforcement activePolicies = spaceQuotaManager.getActiveEnforcements();
145    SpaceViolationPolicyEnforcement enforcement = activePolicies.getPolicyEnforcement(tn);
146    assertTrue("Expected to find Noop policy, but got " + enforcement.getClass().getSimpleName(),
147      enforcement instanceof DefaultViolationPolicyEnforcement);
148
149    // Should generate two files, each of which is over 25KB each
150    Map<byte[], List<Path>> family2Files = helper.generateFileToLoad(tn, 2, 525);
151    FileSystem fs = TEST_UTIL.getTestFileSystem();
152    FileStatus[] files =
153      fs.listStatus(new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files"));
154    for (FileStatus file : files) {
155      assertTrue("Expected the file, " + file.getPath()
156        + ",  length to be larger than 25KB, but was " + file.getLen(),
157        file.getLen() > 25 * SpaceQuotaHelperForTests.ONE_KILOBYTE);
158      LOG.debug(file.getPath() + " -> " + file.getLen() + "B");
159    }
160
161    try {
162      BulkLoadHFiles.create(TEST_UTIL.getConfiguration()).bulkLoad(tn, family2Files);
163      fail("Expected the bulk load call to fail!");
164    } catch (IOException e) {
165      // Pass
166      assertThat(e.getCause(), instanceOf(SpaceLimitingException.class));
167      LOG.trace("Caught expected exception", e);
168    }
169    // Verify that we have no data in the table because neither file should have been
170    // loaded even though one of the files could have.
171    Table table = TEST_UTIL.getConnection().getTable(tn);
172    ResultScanner scanner = table.getScanner(new Scan());
173    try {
174      assertNull("Expected no results", scanner.next());
175    } finally {
176      scanner.close();
177    }
178  }
179
180  private Map<RegionInfo, Long> getReportedSizesForTable(TableName tn) {
181    HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
182    MasterQuotaManager quotaManager = master.getMasterQuotaManager();
183    Map<RegionInfo, Long> filteredRegionSizes = new HashMap<>();
184    for (Map.Entry<RegionInfo, Long> entry : quotaManager.snapshotRegionSizes().entrySet()) {
185      if (entry.getKey().getTable().equals(tn)) {
186        filteredRegionSizes.put(entry.getKey(), entry.getValue());
187      }
188    }
189    return filteredRegionSizes;
190  }
191}