001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.quotas;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertNotNull;
022
023import java.io.IOException;
024import java.util.Arrays;
025import java.util.HashSet;
026import java.util.Map;
027import java.util.concurrent.atomic.AtomicLong;
028import org.apache.hadoop.conf.Configuration;
029import org.apache.hadoop.hbase.Cell;
030import org.apache.hadoop.hbase.CellScanner;
031import org.apache.hadoop.hbase.HBaseClassTestRule;
032import org.apache.hadoop.hbase.HBaseTestingUtility;
033import org.apache.hadoop.hbase.HColumnDescriptor;
034import org.apache.hadoop.hbase.HTableDescriptor;
035import org.apache.hadoop.hbase.NamespaceDescriptor;
036import org.apache.hadoop.hbase.TableName;
037import org.apache.hadoop.hbase.client.Admin;
038import org.apache.hadoop.hbase.client.Connection;
039import org.apache.hadoop.hbase.client.Get;
040import org.apache.hadoop.hbase.client.Result;
041import org.apache.hadoop.hbase.client.ResultScanner;
042import org.apache.hadoop.hbase.client.Scan;
043import org.apache.hadoop.hbase.client.SnapshotDescription;
044import org.apache.hadoop.hbase.client.SnapshotType;
045import org.apache.hadoop.hbase.client.Table;
046import org.apache.hadoop.hbase.master.HMaster;
047import org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.SnapshotWithSize;
048import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.NoFilesToDischarge;
049import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate;
050import org.apache.hadoop.hbase.regionserver.HStore;
051import org.apache.hadoop.hbase.testclassification.MediumTests;
052import org.junit.AfterClass;
053import org.junit.Before;
054import org.junit.BeforeClass;
055import org.junit.ClassRule;
056import org.junit.Rule;
057import org.junit.Test;
058import org.junit.experimental.categories.Category;
059import org.junit.rules.TestName;
060import org.slf4j.Logger;
061import org.slf4j.LoggerFactory;
062
063import org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap;
064import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
065import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
066
067/**
068 * Test class for the {@link SnapshotQuotaObserverChore}.
069 */
070@Category(MediumTests.class)
071public class TestSnapshotQuotaObserverChore {
072
073  @ClassRule
074  public static final HBaseClassTestRule CLASS_RULE =
075      HBaseClassTestRule.forClass(TestSnapshotQuotaObserverChore.class);
076
077  private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotQuotaObserverChore.class);
078  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
079  private static final AtomicLong COUNTER = new AtomicLong();
080
081  @Rule
082  public TestName testName = new TestName();
083
084  private Connection conn;
085  private Admin admin;
086  private SpaceQuotaHelperForTests helper;
087  private HMaster master;
088  private SnapshotQuotaObserverChore testChore;
089
090  @BeforeClass
091  public static void setUp() throws Exception {
092    Configuration conf = TEST_UTIL.getConfiguration();
093    SpaceQuotaHelperForTests.updateConfigForQuotas(conf);
094    // Clean up the compacted files faster than normal (15s instead of 2mins)
095    conf.setInt("hbase.hfile.compaction.discharger.interval", 15 * 1000);
096    TEST_UTIL.startMiniCluster(1);
097  }
098
099  @AfterClass
100  public static void tearDown() throws Exception {
101    TEST_UTIL.shutdownMiniCluster();
102  }
103
104  @Before
105  public void setup() throws Exception {
106    conn = TEST_UTIL.getConnection();
107    admin = TEST_UTIL.getAdmin();
108    helper = new SpaceQuotaHelperForTests(TEST_UTIL, testName, COUNTER);
109    master = TEST_UTIL.getHBaseCluster().getMaster();
110    helper.removeAllQuotas(conn);
111    testChore = new SnapshotQuotaObserverChore(
112        TEST_UTIL.getConnection(), TEST_UTIL.getConfiguration(), master.getFileSystem(), master,
113        null);
114  }
115
116  @Test
117  public void testSnapshotSizePersistence() throws IOException {
118    final Admin admin = TEST_UTIL.getAdmin();
119    final TableName tn = TableName.valueOf("quota_snapshotSizePersistence");
120    if (admin.tableExists(tn)) {
121      admin.disableTable(tn);
122      admin.deleteTable(tn);
123    }
124    HTableDescriptor desc = new HTableDescriptor(tn);
125    desc.addFamily(new HColumnDescriptor(QuotaTableUtil.QUOTA_FAMILY_USAGE));
126    admin.createTable(desc);
127
128    Multimap<TableName,SnapshotWithSize> snapshotsWithSizes = HashMultimap.create();
129    try (Table table = conn.getTable(tn)) {
130      // Writing no values will result in no records written.
131      verify(table, () -> {
132        testChore.persistSnapshotSizes(table, snapshotsWithSizes);
133        assertEquals(0, count(table));
134      });
135
136      verify(table, () -> {
137        TableName originatingTable = TableName.valueOf("t1");
138        snapshotsWithSizes.put(originatingTable, new SnapshotWithSize("ss1", 1024L));
139        snapshotsWithSizes.put(originatingTable, new SnapshotWithSize("ss2", 4096L));
140        testChore.persistSnapshotSizes(table, snapshotsWithSizes);
141        assertEquals(2, count(table));
142        assertEquals(1024L, extractSnapshotSize(table, originatingTable, "ss1"));
143        assertEquals(4096L, extractSnapshotSize(table, originatingTable, "ss2"));
144      });
145
146      snapshotsWithSizes.clear();
147      verify(table, () -> {
148        snapshotsWithSizes.put(TableName.valueOf("t1"), new SnapshotWithSize("ss1", 1024L));
149        snapshotsWithSizes.put(TableName.valueOf("t2"), new SnapshotWithSize("ss2", 4096L));
150        snapshotsWithSizes.put(TableName.valueOf("t3"), new SnapshotWithSize("ss3", 8192L));
151        testChore.persistSnapshotSizes(table, snapshotsWithSizes);
152        assertEquals(3, count(table));
153        assertEquals(1024L, extractSnapshotSize(table, TableName.valueOf("t1"), "ss1"));
154        assertEquals(4096L, extractSnapshotSize(table, TableName.valueOf("t2"), "ss2"));
155        assertEquals(8192L, extractSnapshotSize(table, TableName.valueOf("t3"), "ss3"));
156      });
157    }
158  }
159
160  @Test
161  public void testSnapshotsFromTables() throws Exception {
162    TableName tn1 = helper.createTableWithRegions(1);
163    TableName tn2 = helper.createTableWithRegions(1);
164    TableName tn3 = helper.createTableWithRegions(1);
165
166    // Set a space quota on table 1 and 2 (but not 3)
167    admin.setQuota(QuotaSettingsFactory.limitTableSpace(
168        tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS));
169    admin.setQuota(QuotaSettingsFactory.limitTableSpace(
170        tn2, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS));
171
172    // Create snapshots on each table (we didn't write any data, so just skipflush)
173    admin.snapshot(new SnapshotDescription(tn1 + "snapshot", tn1, SnapshotType.SKIPFLUSH));
174    admin.snapshot(new SnapshotDescription(tn2 + "snapshot", tn2, SnapshotType.SKIPFLUSH));
175    admin.snapshot(new SnapshotDescription(tn3 + "snapshot", tn3, SnapshotType.SKIPFLUSH));
176
177    Multimap<TableName,String> mapping = testChore.getSnapshotsToComputeSize();
178    assertEquals(2, mapping.size());
179    assertEquals(1, mapping.get(tn1).size());
180    assertEquals(tn1 + "snapshot", mapping.get(tn1).iterator().next());
181    assertEquals(1, mapping.get(tn2).size());
182    assertEquals(tn2 + "snapshot", mapping.get(tn2).iterator().next());
183
184    admin.snapshot(new SnapshotDescription(tn2 + "snapshot1", tn2, SnapshotType.SKIPFLUSH));
185    admin.snapshot(new SnapshotDescription(tn3 + "snapshot1", tn3, SnapshotType.SKIPFLUSH));
186
187    mapping = testChore.getSnapshotsToComputeSize();
188    assertEquals(3, mapping.size());
189    assertEquals(1, mapping.get(tn1).size());
190    assertEquals(tn1 + "snapshot", mapping.get(tn1).iterator().next());
191    assertEquals(2, mapping.get(tn2).size());
192    assertEquals(
193        new HashSet<String>(Arrays.asList(tn2 + "snapshot", tn2 + "snapshot1")), mapping.get(tn2));
194  }
195
196  @Test
197  public void testSnapshotsFromNamespaces() throws Exception {
198    NamespaceDescriptor ns = NamespaceDescriptor.create("snapshots_from_namespaces").build();
199    admin.createNamespace(ns);
200
201    TableName tn1 = helper.createTableWithRegions(ns.getName(), 1);
202    TableName tn2 = helper.createTableWithRegions(ns.getName(), 1);
203    TableName tn3 = helper.createTableWithRegions(1);
204
205    // Set a space quota on the namespace
206    admin.setQuota(QuotaSettingsFactory.limitNamespaceSpace(
207        ns.getName(), SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS));
208
209    // Create snapshots on each table (we didn't write any data, so just skipflush)
210    admin.snapshot(new SnapshotDescription(
211        tn1.getQualifierAsString() + "snapshot", tn1, SnapshotType.SKIPFLUSH));
212    admin.snapshot(new SnapshotDescription(
213        tn2.getQualifierAsString() + "snapshot", tn2, SnapshotType.SKIPFLUSH));
214    admin.snapshot(new SnapshotDescription(
215        tn3.getQualifierAsString() + "snapshot", tn3, SnapshotType.SKIPFLUSH));
216
217    Multimap<TableName,String> mapping = testChore.getSnapshotsToComputeSize();
218    assertEquals(2, mapping.size());
219    assertEquals(1, mapping.get(tn1).size());
220    assertEquals(tn1.getQualifierAsString() + "snapshot", mapping.get(tn1).iterator().next());
221    assertEquals(1, mapping.get(tn2).size());
222    assertEquals(tn2.getQualifierAsString() + "snapshot", mapping.get(tn2).iterator().next());
223
224    admin.snapshot(new SnapshotDescription(
225        tn2.getQualifierAsString() + "snapshot1", tn2, SnapshotType.SKIPFLUSH));
226    admin.snapshot(new SnapshotDescription(
227        tn3.getQualifierAsString() + "snapshot2", tn3, SnapshotType.SKIPFLUSH));
228
229    mapping = testChore.getSnapshotsToComputeSize();
230    assertEquals(3, mapping.size());
231    assertEquals(1, mapping.get(tn1).size());
232    assertEquals(tn1.getQualifierAsString() + "snapshot", mapping.get(tn1).iterator().next());
233    assertEquals(2, mapping.get(tn2).size());
234    assertEquals(
235        new HashSet<String>(Arrays.asList(tn2.getQualifierAsString() + "snapshot",
236            tn2.getQualifierAsString() + "snapshot1")), mapping.get(tn2));
237  }
238
239  @Test
240  public void testSnapshotSize() throws Exception {
241    // Create a table and set a quota
242    TableName tn1 = helper.createTableWithRegions(5);
243    admin.setQuota(QuotaSettingsFactory.limitTableSpace(
244        tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS));
245
246    // Write some data and flush it
247    helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE);
248    admin.flush(tn1);
249
250    final long snapshotSize = TEST_UTIL.getMiniHBaseCluster().getRegions(tn1).stream()
251        .flatMap(r -> r.getStores().stream()).mapToLong(HStore::getHFilesSize).sum();
252
253    // Wait for the Master chore to run to see the usage (with a fudge factor)
254    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {
255      @Override
256      boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
257        return snapshot.getUsage() == snapshotSize;
258      }
259    });
260
261    // Create a snapshot on the table
262    final String snapshotName = tn1 + "snapshot";
263    admin.snapshot(new SnapshotDescription(snapshotName, tn1, SnapshotType.SKIPFLUSH));
264
265    // Get the snapshots
266    Multimap<TableName,String> snapshotsToCompute = testChore.getSnapshotsToComputeSize();
267    assertEquals(
268        "Expected to see the single snapshot: " + snapshotsToCompute, 1, snapshotsToCompute.size());
269
270    // Get the size of our snapshot
271    Multimap<TableName,SnapshotWithSize> snapshotsWithSize = testChore.computeSnapshotSizes(
272        snapshotsToCompute);
273    assertEquals(1, snapshotsWithSize.size());
274    SnapshotWithSize sws = Iterables.getOnlyElement(snapshotsWithSize.get(tn1));
275    assertEquals(snapshotName, sws.getName());
276    // The snapshot should take up no space since the table refers to it completely
277    assertEquals(0, sws.getSize());
278
279    // Write some more data, flush it, and then major_compact the table
280    helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE);
281    admin.flush(tn1);
282    TEST_UTIL.compact(tn1, true);
283
284    // Test table should reflect it's original size since ingest was deterministic
285    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {
286      private final long regionSize = TEST_UTIL.getMiniHBaseCluster().getRegions(tn1).stream()
287          .flatMap(r -> r.getStores().stream()).mapToLong(HStore::getHFilesSize).sum();
288
289      @Override
290      boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
291        LOG.debug("Current usage=" + snapshot.getUsage() + " snapshotSize=" + snapshotSize);
292        // The usage of table space consists of region size and snapshot size
293        return closeInSize(snapshot.getUsage(), snapshotSize + regionSize,
294            SpaceQuotaHelperForTests.ONE_KILOBYTE);
295      }
296    });
297
298    // Wait for no compacted files on the regions of our table
299    TEST_UTIL.waitFor(30_000, new NoFilesToDischarge(TEST_UTIL.getMiniHBaseCluster(), tn1));
300
301    // Still should see only one snapshot
302    snapshotsToCompute = testChore.getSnapshotsToComputeSize();
303    assertEquals(
304        "Expected to see the single snapshot: " + snapshotsToCompute, 1, snapshotsToCompute.size());
305    snapshotsWithSize = testChore.computeSnapshotSizes(
306            snapshotsToCompute);
307    assertEquals(1, snapshotsWithSize.size());
308    sws = Iterables.getOnlyElement(snapshotsWithSize.get(tn1));
309    assertEquals(snapshotName, sws.getName());
310    // The snapshot should take up the size the table originally took up
311    assertEquals(snapshotSize, sws.getSize());
312  }
313
314  @Test
315  public void testPersistingSnapshotsForNamespaces() throws Exception {
316    Multimap<TableName,SnapshotWithSize> snapshotsWithSizes = HashMultimap.create();
317    TableName tn1 = TableName.valueOf("ns1:tn1");
318    TableName tn2 = TableName.valueOf("ns1:tn2");
319    TableName tn3 = TableName.valueOf("ns2:tn1");
320    TableName tn4 = TableName.valueOf("ns2:tn2");
321    TableName tn5 = TableName.valueOf("tn1");
322
323    snapshotsWithSizes.put(tn1, new SnapshotWithSize("", 1024L));
324    snapshotsWithSizes.put(tn2, new SnapshotWithSize("", 1024L));
325    snapshotsWithSizes.put(tn3, new SnapshotWithSize("", 512L));
326    snapshotsWithSizes.put(tn4, new SnapshotWithSize("", 1024L));
327    snapshotsWithSizes.put(tn5, new SnapshotWithSize("", 3072L));
328
329    Map<String,Long> nsSizes = testChore.groupSnapshotSizesByNamespace(snapshotsWithSizes);
330    assertEquals(3, nsSizes.size());
331    assertEquals(2048L, (long) nsSizes.get("ns1"));
332    assertEquals(1536L, (long) nsSizes.get("ns2"));
333    assertEquals(3072L, (long) nsSizes.get(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR));
334  }
335
336  private long count(Table t) throws IOException {
337    try (ResultScanner rs = t.getScanner(new Scan())) {
338      long sum = 0;
339      for (Result r : rs) {
340        while (r.advance()) {
341          sum++;
342        }
343      }
344      return sum;
345    }
346  }
347
348  private long extractSnapshotSize(
349      Table quotaTable, TableName tn, String snapshot) throws IOException {
350    Get g = QuotaTableUtil.makeGetForSnapshotSize(tn, snapshot);
351    Result r = quotaTable.get(g);
352    assertNotNull(r);
353    CellScanner cs = r.cellScanner();
354    cs.advance();
355    Cell c = cs.current();
356    assertNotNull(c);
357    return QuotaTableUtil.extractSnapshotSize(
358        c.getValueArray(), c.getValueOffset(), c.getValueLength());
359  }
360
361  private void verify(Table t, IOThrowingRunnable test) throws IOException {
362    admin.disableTable(t.getName());
363    admin.truncateTable(t.getName(), false);
364    test.run();
365  }
366
367  @FunctionalInterface
368  private interface IOThrowingRunnable {
369    void run() throws IOException;
370  }
371
372  /**
373   * Computes if {@code size2} is within {@code delta} of {@code size1}, inclusive.
374   */
375  boolean closeInSize(long size1, long size2, long delta) {
376    long lower = size1 - delta;
377    long upper = size1 + delta;
378    return lower <= size2 && size2 <= upper;
379  }
380}