001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.quotas;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertFalse;
022import static org.junit.Assert.assertNotNull;
023import static org.junit.Assert.assertTrue;
024
025import java.io.IOException;
026import java.util.Arrays;
027import java.util.Collection;
028import java.util.HashSet;
029import java.util.Map;
030import java.util.Map.Entry;
031import java.util.Set;
032import java.util.concurrent.TimeUnit;
033import java.util.concurrent.atomic.AtomicLong;
034import java.util.concurrent.atomic.AtomicReference;
035
036import org.apache.hadoop.conf.Configuration;
037import org.apache.hadoop.fs.FileSystem;
038import org.apache.hadoop.hbase.Cell;
039import org.apache.hadoop.hbase.HBaseClassTestRule;
040import org.apache.hadoop.hbase.HBaseTestingUtil;
041import org.apache.hadoop.hbase.NamespaceDescriptor;
042import org.apache.hadoop.hbase.TableName;
043import org.apache.hadoop.hbase.Waiter.Predicate;
044import org.apache.hadoop.hbase.client.Admin;
045import org.apache.hadoop.hbase.client.Connection;
046import org.apache.hadoop.hbase.client.Get;
047import org.apache.hadoop.hbase.client.Result;
048import org.apache.hadoop.hbase.client.SnapshotDescription;
049import org.apache.hadoop.hbase.client.SnapshotType;
050import org.apache.hadoop.hbase.client.Table;
051import org.apache.hadoop.hbase.master.HMaster;
052import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.NoFilesToDischarge;
053import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate;
054import org.apache.hadoop.hbase.regionserver.HStore;
055import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
056import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil.SnapshotVisitor;
057import org.apache.hadoop.hbase.testclassification.LargeTests;
058import org.junit.AfterClass;
059import org.junit.Before;
060import org.junit.BeforeClass;
061import org.junit.ClassRule;
062import org.junit.Rule;
063import org.junit.Test;
064import org.junit.experimental.categories.Category;
065import org.junit.rules.TestName;
066import org.slf4j.Logger;
067import org.slf4j.LoggerFactory;
068
069import org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap;
070import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
071import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
072
073/**
074 * Test class for the {@link SnapshotQuotaObserverChore}.
075 */
076@Category(LargeTests.class)
077public class TestSnapshotQuotaObserverChore {
078
079  @ClassRule
080  public static final HBaseClassTestRule CLASS_RULE =
081      HBaseClassTestRule.forClass(TestSnapshotQuotaObserverChore.class);
082
083  private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotQuotaObserverChore.class);
084  private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
085  private static final AtomicLong COUNTER = new AtomicLong();
086
087  @Rule
088  public TestName testName = new TestName();
089
090  private Connection conn;
091  private Admin admin;
092  private SpaceQuotaHelperForTests helper;
093  private HMaster master;
094  private SnapshotQuotaObserverChore testChore;
095
096  @BeforeClass
097  public static void setUp() throws Exception {
098    Configuration conf = TEST_UTIL.getConfiguration();
099    SpaceQuotaHelperForTests.updateConfigForQuotas(conf);
100    // Clean up the compacted files faster than normal (15s instead of 2mins)
101    conf.setInt("hbase.hfile.compaction.discharger.interval", 15 * 1000);
102    TEST_UTIL.startMiniCluster(1);
103  }
104
105  @AfterClass
106  public static void tearDown() throws Exception {
107    TEST_UTIL.shutdownMiniCluster();
108  }
109
110  @Before
111  public void setup() throws Exception {
112    conn = TEST_UTIL.getConnection();
113    admin = TEST_UTIL.getAdmin();
114    helper = new SpaceQuotaHelperForTests(TEST_UTIL, testName, COUNTER);
115    master = TEST_UTIL.getHBaseCluster().getMaster();
116    helper.removeAllQuotas(conn);
117    testChore = new SnapshotQuotaObserverChore(
118        TEST_UTIL.getConnection(), TEST_UTIL.getConfiguration(), master.getFileSystem(), master,
119        null);
120  }
121
122  @Test
123  public void testSnapshotsFromTables() throws Exception {
124    TableName tn1 = helper.createTableWithRegions(1);
125    TableName tn2 = helper.createTableWithRegions(1);
126    TableName tn3 = helper.createTableWithRegions(1);
127
128    // Set a space quota on table 1 and 2 (but not 3)
129    admin.setQuota(QuotaSettingsFactory.limitTableSpace(
130        tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS));
131    admin.setQuota(QuotaSettingsFactory.limitTableSpace(
132        tn2, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS));
133
134    // Create snapshots on each table (we didn't write any data, so just skipflush)
135    admin.snapshot(new SnapshotDescription(tn1 + "snapshot", tn1, SnapshotType.SKIPFLUSH));
136    admin.snapshot(new SnapshotDescription(tn2 + "snapshot", tn2, SnapshotType.SKIPFLUSH));
137    admin.snapshot(new SnapshotDescription(tn3 + "snapshot", tn3, SnapshotType.SKIPFLUSH));
138
139    Multimap<TableName,String> mapping = testChore.getSnapshotsToComputeSize();
140    assertEquals(2, mapping.size());
141    assertEquals(1, mapping.get(tn1).size());
142    assertEquals(tn1 + "snapshot", mapping.get(tn1).iterator().next());
143    assertEquals(1, mapping.get(tn2).size());
144    assertEquals(tn2 + "snapshot", mapping.get(tn2).iterator().next());
145
146    admin.snapshot(new SnapshotDescription(tn2 + "snapshot1", tn2, SnapshotType.SKIPFLUSH));
147    admin.snapshot(new SnapshotDescription(tn3 + "snapshot1", tn3, SnapshotType.SKIPFLUSH));
148
149    mapping = testChore.getSnapshotsToComputeSize();
150    assertEquals(3, mapping.size());
151    assertEquals(1, mapping.get(tn1).size());
152    assertEquals(tn1 + "snapshot", mapping.get(tn1).iterator().next());
153    assertEquals(2, mapping.get(tn2).size());
154    assertEquals(
155        new HashSet<String>(Arrays.asList(tn2 + "snapshot", tn2 + "snapshot1")), mapping.get(tn2));
156  }
157
158  @Test
159  public void testSnapshotsFromNamespaces() throws Exception {
160    NamespaceDescriptor ns = NamespaceDescriptor.create("snapshots_from_namespaces").build();
161    admin.createNamespace(ns);
162
163    TableName tn1 = helper.createTableWithRegions(ns.getName(), 1);
164    TableName tn2 = helper.createTableWithRegions(ns.getName(), 1);
165    TableName tn3 = helper.createTableWithRegions(1);
166
167    // Set a throttle quota on 'default' namespace
168    admin.setQuota(QuotaSettingsFactory.throttleNamespace(tn3.getNamespaceAsString(),
169      ThrottleType.WRITE_NUMBER, 100, TimeUnit.SECONDS));
170    // Set a user throttle quota
171    admin.setQuota(
172      QuotaSettingsFactory.throttleUser("user", ThrottleType.WRITE_NUMBER, 100, TimeUnit.MINUTES));
173
174    // Set a space quota on the namespace
175    admin.setQuota(QuotaSettingsFactory.limitNamespaceSpace(
176        ns.getName(), SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS));
177
178    // Create snapshots on each table (we didn't write any data, so just skipflush)
179    admin.snapshot(new SnapshotDescription(
180        tn1.getQualifierAsString() + "snapshot", tn1, SnapshotType.SKIPFLUSH));
181    admin.snapshot(new SnapshotDescription(
182        tn2.getQualifierAsString() + "snapshot", tn2, SnapshotType.SKIPFLUSH));
183    admin.snapshot(new SnapshotDescription(
184        tn3.getQualifierAsString() + "snapshot", tn3, SnapshotType.SKIPFLUSH));
185
186    Multimap<TableName,String> mapping = testChore.getSnapshotsToComputeSize();
187    assertEquals(2, mapping.size());
188    assertEquals(1, mapping.get(tn1).size());
189    assertEquals(tn1.getQualifierAsString() + "snapshot", mapping.get(tn1).iterator().next());
190    assertEquals(1, mapping.get(tn2).size());
191    assertEquals(tn2.getQualifierAsString() + "snapshot", mapping.get(tn2).iterator().next());
192
193    admin.snapshot(new SnapshotDescription(
194        tn2.getQualifierAsString() + "snapshot1", tn2, SnapshotType.SKIPFLUSH));
195    admin.snapshot(new SnapshotDescription(
196        tn3.getQualifierAsString() + "snapshot2", tn3, SnapshotType.SKIPFLUSH));
197
198    mapping = testChore.getSnapshotsToComputeSize();
199    assertEquals(3, mapping.size());
200    assertEquals(1, mapping.get(tn1).size());
201    assertEquals(tn1.getQualifierAsString() + "snapshot", mapping.get(tn1).iterator().next());
202    assertEquals(2, mapping.get(tn2).size());
203    assertEquals(
204        new HashSet<String>(Arrays.asList(tn2.getQualifierAsString() + "snapshot",
205            tn2.getQualifierAsString() + "snapshot1")), mapping.get(tn2));
206  }
207
208  @Test
209  public void testSnapshotSize() throws Exception {
210    // Create a table and set a quota
211    TableName tn1 = helper.createTableWithRegions(5);
212    admin.setQuota(QuotaSettingsFactory.limitTableSpace(
213        tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS));
214
215    // Write some data and flush it
216    helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE);
217    admin.flush(tn1);
218
219    final long snapshotSize = TEST_UTIL.getMiniHBaseCluster().getRegions(tn1).stream()
220        .flatMap(r -> r.getStores().stream()).mapToLong(HStore::getHFilesSize).sum();
221
222    // Wait for the Master chore to run to see the usage (with a fudge factor)
223    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {
224      @Override
225      boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
226        return snapshot.getUsage() == snapshotSize;
227      }
228    });
229
230    // Create a snapshot on the table
231    final String snapshotName = tn1 + "snapshot";
232    admin.snapshot(new SnapshotDescription(snapshotName, tn1, SnapshotType.SKIPFLUSH));
233
234    // Get the snapshots
235    Multimap<TableName,String> snapshotsToCompute = testChore.getSnapshotsToComputeSize();
236    assertEquals(
237        "Expected to see the single snapshot: " + snapshotsToCompute, 1, snapshotsToCompute.size());
238
239    // Get the size of our snapshot
240    Map<String,Long> namespaceSnapshotSizes = testChore.computeSnapshotSizes(
241        snapshotsToCompute);
242    assertEquals(1, namespaceSnapshotSizes.size());
243    Long size = namespaceSnapshotSizes.get(tn1.getNamespaceAsString());
244    assertNotNull(size);
245    // The snapshot should take up no space since the table refers to it completely
246    assertEquals(0, size.longValue());
247
248    // Write some more data, flush it, and then major_compact the table
249    helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE);
250    admin.flush(tn1);
251    TEST_UTIL.compact(tn1, true);
252
253    // Test table should reflect it's original size since ingest was deterministic
254    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {
255      private final long regionSize = TEST_UTIL.getMiniHBaseCluster().getRegions(tn1).stream()
256          .flatMap(r -> r.getStores().stream()).mapToLong(HStore::getHFilesSize).sum();
257
258      @Override
259      boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
260        LOG.debug("Current usage=" + snapshot.getUsage() + " snapshotSize=" + snapshotSize);
261        // The usage of table space consists of region size and snapshot size
262        return closeInSize(snapshot.getUsage(), snapshotSize + regionSize,
263            SpaceQuotaHelperForTests.ONE_KILOBYTE);
264      }
265    });
266
267    // Wait for no compacted files on the regions of our table
268    TEST_UTIL.waitFor(30_000, new NoFilesToDischarge(TEST_UTIL.getMiniHBaseCluster(), tn1));
269
270    // Still should see only one snapshot
271    snapshotsToCompute = testChore.getSnapshotsToComputeSize();
272    assertEquals(
273        "Expected to see the single snapshot: " + snapshotsToCompute, 1, snapshotsToCompute.size());
274    namespaceSnapshotSizes = testChore.computeSnapshotSizes(
275            snapshotsToCompute);
276    assertEquals(1, namespaceSnapshotSizes.size());
277    size = namespaceSnapshotSizes.get(tn1.getNamespaceAsString());
278    assertNotNull(size);
279    // The snapshot should take up the size the table originally took up
280    assertEquals(snapshotSize, size.longValue());
281  }
282
283  @Test
284  public void testPersistingSnapshotsForNamespaces() throws Exception {
285    TableName tn1 = TableName.valueOf("ns1:tn1");
286    TableName tn2 = TableName.valueOf("ns1:tn2");
287    TableName tn3 = TableName.valueOf("ns2:tn1");
288    TableName tn4 = TableName.valueOf("ns2:tn2");
289    TableName tn5 = TableName.valueOf("tn1");
290    // Shim in a custom factory to avoid computing snapshot sizes.
291    FileArchiverNotifierFactory test = new FileArchiverNotifierFactory() {
292      Map<TableName,Long> tableToSize = ImmutableMap.of(
293          tn1, 1024L, tn2, 1024L, tn3, 512L, tn4, 1024L, tn5, 3072L);
294      @Override
295      public FileArchiverNotifier get(
296          Connection conn, Configuration conf, FileSystem fs, TableName tn) {
297        return new FileArchiverNotifier() {
298          @Override public void addArchivedFiles(Set<Entry<String,Long>> fileSizes)
299              throws IOException {}
300
301          @Override
302          public long computeAndStoreSnapshotSizes(Collection<String> currentSnapshots)
303              throws IOException {
304            return tableToSize.get(tn);
305          }
306        };
307      }
308    };
309    try {
310      FileArchiverNotifierFactoryImpl.setInstance(test);
311
312      Multimap<TableName,String> snapshotsToCompute = HashMultimap.create();
313      snapshotsToCompute.put(tn1, "");
314      snapshotsToCompute.put(tn2, "");
315      snapshotsToCompute.put(tn3, "");
316      snapshotsToCompute.put(tn4, "");
317      snapshotsToCompute.put(tn5, "");
318      Map<String,Long> nsSizes = testChore.computeSnapshotSizes(snapshotsToCompute);
319      assertEquals(3, nsSizes.size());
320      assertEquals(2048L, (long) nsSizes.get("ns1"));
321      assertEquals(1536L, (long) nsSizes.get("ns2"));
322      assertEquals(3072L, (long) nsSizes.get(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR));
323    } finally {
324      FileArchiverNotifierFactoryImpl.reset();
325    }
326  }
327
328  @Test
329  public void testRemovedSnapshots() throws Exception {
330    // Create a table and set a quota
331    TableName tn1 = helper.createTableWithRegions(1);
332    admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE,
333        SpaceViolationPolicy.NO_INSERTS));
334
335    // Write some data and flush it
336    helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE); // 256 KB
337
338    final AtomicReference<Long> lastSeenSize = new AtomicReference<>();
339    // Wait for the Master chore to run to see the usage (with a fudge factor)
340    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {
341      @Override
342      boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
343        lastSeenSize.set(snapshot.getUsage());
344        return snapshot.getUsage() > 230L * SpaceQuotaHelperForTests.ONE_KILOBYTE;
345      }
346    });
347
348    // Create a snapshot on the table
349    final String snapshotName1 = tn1 + "snapshot1";
350    admin.snapshot(new SnapshotDescription(snapshotName1, tn1, SnapshotType.SKIPFLUSH));
351
352    // Snapshot size has to be 0 as the snapshot shares the data with the table
353    final Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME);
354    TEST_UTIL.waitFor(30_000, new Predicate<Exception>() {
355      @Override
356      public boolean evaluate() throws Exception {
357        Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName1);
358        Result r = quotaTable.get(g);
359        if (r == null || r.isEmpty()) {
360          return false;
361        }
362        r.advance();
363        Cell c = r.current();
364        return QuotaTableUtil.parseSnapshotSize(c) == 0;
365      }
366    });
367    // Total usage has to remain same as what we saw before taking a snapshot
368    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {
369      @Override
370      boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
371        return snapshot.getUsage() == lastSeenSize.get();
372      }
373    });
374
375    // Major compact the table to force a rewrite
376    TEST_UTIL.compact(tn1, true);
377    // Now the snapshot size has to prev total size
378    TEST_UTIL.waitFor(30_000, new Predicate<Exception>() {
379      @Override
380      public boolean evaluate() throws Exception {
381        Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName1);
382        Result r = quotaTable.get(g);
383        if (r == null || r.isEmpty()) {
384          return false;
385        }
386        r.advance();
387        Cell c = r.current();
388        // The compaction result file has an additional compaction event tracker
389        return lastSeenSize.get() == QuotaTableUtil.parseSnapshotSize(c);
390      }
391    });
392    // The total size now has to be equal/more than double of prev total size
393    // as double the number of store files exist now.
394    final AtomicReference<Long> sizeAfterCompaction = new AtomicReference<>();
395    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {
396      @Override
397      boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
398        sizeAfterCompaction.set(snapshot.getUsage());
399        return snapshot.getUsage() >= 2 * lastSeenSize.get();
400      }
401    });
402
403    // Delete the snapshot
404    admin.deleteSnapshot(snapshotName1);
405    // Total size has to come down to prev totalsize - snapshot size(which was removed)
406    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {
407      @Override
408      boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
409        return snapshot.getUsage() == (sizeAfterCompaction.get() - lastSeenSize.get());
410      }
411    });
412  }
413
414  @Test
415  public void testBucketingFilesToSnapshots() throws Exception {
416    // Create a table and set a quota
417    TableName tn1 = helper.createTableWithRegions(1);
418    admin.setQuota(QuotaSettingsFactory.limitTableSpace(
419        tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS));
420
421    // Write some data and flush it
422    helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE);
423    admin.flush(tn1);
424
425    final AtomicReference<Long> lastSeenSize = new AtomicReference<>();
426    // Wait for the Master chore to run to see the usage (with a fudge factor)
427    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {
428      @Override
429      boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
430        lastSeenSize.set(snapshot.getUsage());
431        return snapshot.getUsage() > 230L * SpaceQuotaHelperForTests.ONE_KILOBYTE;
432      }
433    });
434
435    // Create a snapshot on the table
436    final String snapshotName1 = tn1 + "snapshot1";
437    admin.snapshot(new SnapshotDescription(snapshotName1, tn1, SnapshotType.SKIPFLUSH));
438    // Major compact the table to force a rewrite
439    TEST_UTIL.compact(tn1, true);
440
441    // Make sure that the snapshot owns the size
442    final Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME);
443    TEST_UTIL.waitFor(30_000, new Predicate<Exception>() {
444      @Override
445      public boolean evaluate() throws Exception {
446        Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName1);
447        Result r = quotaTable.get(g);
448        if (r == null || r.isEmpty()) {
449          return false;
450        }
451        r.advance();
452        Cell c = r.current();
453        // The compaction result file has an additional compaction event tracker
454        return lastSeenSize.get() <= QuotaTableUtil.parseSnapshotSize(c);
455      }
456    });
457
458    // Create another snapshot on the table
459    final String snapshotName2 = tn1 + "snapshot2";
460    admin.snapshot(new SnapshotDescription(snapshotName2, tn1, SnapshotType.SKIPFLUSH));
461    // Major compact the table to force a rewrite
462    TEST_UTIL.compact(tn1, true);
463
464    // Make sure that the snapshot owns the size
465    TEST_UTIL.waitFor(30_000, new Predicate<Exception>() {
466      @Override
467      public boolean evaluate() throws Exception {
468        Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName2);
469        Result r = quotaTable.get(g);
470        if (r == null || r.isEmpty()) {
471          return false;
472        }
473        r.advance();
474        Cell c = r.current();
475        // The compaction result file has an additional compaction event tracker
476        return lastSeenSize.get() <= QuotaTableUtil.parseSnapshotSize(c);
477      }
478    });
479
480    Get g = QuotaTableUtil.createGetNamespaceSnapshotSize(tn1.getNamespaceAsString());
481    Result r = quotaTable.get(g);
482    assertNotNull(r);
483    assertFalse(r.isEmpty());
484    r.advance();
485    long size = QuotaTableUtil.parseSnapshotSize(r.current());
486    assertTrue(lastSeenSize.get() * 2 <= size);
487  }
488
489  /**
490   * Computes if {@code size2} is within {@code delta} of {@code size1}, inclusive.
491   */
492  boolean closeInSize(long size1, long size2, long delta) {
493    long lower = size1 - delta;
494    long upper = size1 + delta;
495    return lower <= size2 && size2 <= upper;
496  }
497}