001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master.cleaner;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertTrue;
022import static org.junit.Assert.fail;
023
024import java.io.IOException;
025import java.util.Collection;
026import java.util.List;
027import java.util.Set;
028import java.util.concurrent.Future;
029import java.util.concurrent.TimeUnit;
030import java.util.regex.Pattern;
031import org.apache.hadoop.conf.Configuration;
032import org.apache.hadoop.fs.FileSystem;
033import org.apache.hadoop.fs.Path;
034import org.apache.hadoop.hbase.HBaseClassTestRule;
035import org.apache.hadoop.hbase.HBaseTestingUtility;
036import org.apache.hadoop.hbase.HConstants;
037import org.apache.hadoop.hbase.TableName;
038import org.apache.hadoop.hbase.Waiter;
039import org.apache.hadoop.hbase.client.Admin;
040import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
041import org.apache.hadoop.hbase.client.Put;
042import org.apache.hadoop.hbase.client.SnapshotType;
043import org.apache.hadoop.hbase.client.Table;
044import org.apache.hadoop.hbase.client.TableDescriptor;
045import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
046import org.apache.hadoop.hbase.master.HMaster;
047import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler;
048import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
049import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
050import org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger;
051import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
052import org.apache.hadoop.hbase.regionserver.HRegion;
053import org.apache.hadoop.hbase.regionserver.HRegionServer;
054import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
055import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
056import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
057import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
058import org.apache.hadoop.hbase.testclassification.MasterTests;
059import org.apache.hadoop.hbase.testclassification.MediumTests;
060import org.apache.hadoop.hbase.util.Bytes;
061import org.apache.hadoop.hbase.util.CommonFSUtils;
062import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
063import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
064import org.junit.After;
065import org.junit.AfterClass;
066import org.junit.Assert;
067import org.junit.Before;
068import org.junit.BeforeClass;
069import org.junit.ClassRule;
070import org.junit.Test;
071import org.junit.experimental.categories.Category;
072import org.mockito.Mockito;
073import org.slf4j.Logger;
074import org.slf4j.LoggerFactory;
075
076import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
077import org.apache.hbase.thirdparty.com.google.common.util.concurrent.Uninterruptibles;
078
079import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
080import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
081import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
082import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
083    .IsSnapshotCleanupEnabledRequest;
084import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
085    .IsSnapshotCleanupEnabledResponse;
086import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
087import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
088import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
089    .SetSnapshotCleanupRequest;
090import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
091
092/**
093 * Test the master-related aspects of a snapshot
094 */
095@Category({MasterTests.class, MediumTests.class})
096public class TestSnapshotFromMaster {
097
098  @ClassRule
099  public static final HBaseClassTestRule CLASS_RULE =
100      HBaseClassTestRule.forClass(TestSnapshotFromMaster.class);
101
102  private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotFromMaster.class);
103  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
104  private static final int NUM_RS = 2;
105  private static Path rootDir;
106  private static FileSystem fs;
107  private static HMaster master;
108
109  // for hfile archiving test.
110  private static Path archiveDir;
111  private static final byte[] TEST_FAM = Bytes.toBytes("fam");
112  private static final TableName TABLE_NAME =
113      TableName.valueOf("test");
114  // refresh the cache every 1/2 second
115  private static final long cacheRefreshPeriod = 500;
116  private static final int blockingStoreFiles = 12;
117
118  /**
119   * Setup the config for the cluster
120   */
121  @BeforeClass
122  public static void setupCluster() throws Exception {
123    setupConf(UTIL.getConfiguration());
124    UTIL.startMiniCluster(NUM_RS);
125    fs = UTIL.getDFSCluster().getFileSystem();
126    master = UTIL.getMiniHBaseCluster().getMaster();
127    rootDir = master.getMasterFileSystem().getRootDir();
128    archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
129  }
130
131  private static void setupConf(Configuration conf) {
132    // disable the ui
133    conf.setInt("hbase.regionsever.info.port", -1);
134    // change the flush size to a small amount, regulating number of store files
135    conf.setInt("hbase.hregion.memstore.flush.size", 25000);
136    // so make sure we get a compaction when doing a load, but keep around some
137    // files in the store
138    conf.setInt("hbase.hstore.compaction.min", 2);
139    conf.setInt("hbase.hstore.compactionThreshold", 5);
140    // block writes if we get to 12 store files
141    conf.setInt("hbase.hstore.blockingStoreFiles", blockingStoreFiles);
142    // Ensure no extra cleaners on by default (e.g. TimeToLiveHFileCleaner)
143    conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
144    conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, "");
145    // Enable snapshot
146    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
147    conf.setLong(SnapshotManager.HBASE_SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLIS, 3 * 1000L);
148    conf.setLong(SnapshotHFileCleaner.HFILE_CACHE_REFRESH_PERIOD_CONF_KEY, cacheRefreshPeriod);
149    conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
150      ConstantSizeRegionSplitPolicy.class.getName());
151    conf.setInt("hbase.hfile.compactions.cleaner.interval", 20 * 1000);
152    conf.setInt("hbase.master.cleaner.snapshot.interval", 500);
153  }
154
155  @Before
156  public void setup() throws Exception {
157    UTIL.createTable(TABLE_NAME, TEST_FAM);
158    master.getSnapshotManager().setSnapshotHandlerForTesting(TABLE_NAME, null);
159  }
160
161  @After
162  public void tearDown() throws Exception {
163    UTIL.deleteTable(TABLE_NAME);
164    SnapshotTestingUtils.deleteAllSnapshots(UTIL.getAdmin());
165    SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
166  }
167
168  @AfterClass
169  public static void cleanupTest() throws Exception {
170    try {
171      UTIL.shutdownMiniCluster();
172    } catch (Exception e) {
173      // NOOP;
174    }
175  }
176
177  /**
178   * Test that the contract from the master for checking on a snapshot are valid.
179   * <p>
180   * <ol>
181   * <li>If a snapshot fails with an error, we expect to get the source error.</li>
182   * <li>If there is no snapshot name supplied, we should get an error.</li>
183   * <li>If asking about a snapshot has hasn't occurred, you should get an error.</li>
184   * </ol>
185   */
186  @Test
187  public void testIsDoneContract() throws Exception {
188
189    IsSnapshotDoneRequest.Builder builder = IsSnapshotDoneRequest.newBuilder();
190
191    String snapshotName = "asyncExpectedFailureTest";
192
193    // check that we get an exception when looking up snapshot where one hasn't happened
194    SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(),
195      UnknownSnapshotException.class);
196
197    // and that we get the same issue, even if we specify a name
198    SnapshotDescription desc = SnapshotDescription.newBuilder()
199      .setName(snapshotName).setTable(TABLE_NAME.getNameAsString()).build();
200    builder.setSnapshot(desc);
201    SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(),
202      UnknownSnapshotException.class);
203
204    // set a mock handler to simulate a snapshot
205    DisabledTableSnapshotHandler mockHandler = Mockito.mock(DisabledTableSnapshotHandler.class);
206    Mockito.when(mockHandler.getException()).thenReturn(null);
207    Mockito.when(mockHandler.getSnapshot()).thenReturn(desc);
208    Mockito.when(mockHandler.isFinished()).thenReturn(Boolean.TRUE);
209    Mockito.when(mockHandler.getCompletionTimestamp())
210      .thenReturn(EnvironmentEdgeManager.currentTime());
211
212    master.getSnapshotManager()
213        .setSnapshotHandlerForTesting(TABLE_NAME, mockHandler);
214
215    // if we do a lookup without a snapshot name, we should fail - you should always know your name
216    builder = IsSnapshotDoneRequest.newBuilder();
217    SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(),
218      UnknownSnapshotException.class);
219
220    // then do the lookup for the snapshot that it is done
221    builder.setSnapshot(desc);
222    IsSnapshotDoneResponse response =
223      master.getMasterRpcServices().isSnapshotDone(null, builder.build());
224    assertTrue("Snapshot didn't complete when it should have.", response.getDone());
225
226    // now try the case where we are looking for a snapshot we didn't take
227    builder.setSnapshot(SnapshotDescription.newBuilder().setName("Not A Snapshot").build());
228    SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(),
229      UnknownSnapshotException.class);
230
231    // then create a snapshot to the fs and make sure that we can find it when checking done
232    snapshotName = "completed";
233    desc = createSnapshot(snapshotName);
234
235    builder.setSnapshot(desc);
236    response = master.getMasterRpcServices().isSnapshotDone(null, builder.build());
237    assertTrue("Completed, on-disk snapshot not found", response.getDone());
238  }
239
240  @Test
241  public void testGetCompletedSnapshots() throws Exception {
242    // first check when there are no snapshots
243    GetCompletedSnapshotsRequest request = GetCompletedSnapshotsRequest.newBuilder().build();
244    GetCompletedSnapshotsResponse response =
245      master.getMasterRpcServices().getCompletedSnapshots(null, request);
246    assertEquals("Found unexpected number of snapshots", 0, response.getSnapshotsCount());
247
248    // write one snapshot to the fs
249    String snapshotName = "completed";
250    SnapshotDescription snapshot = createSnapshot(snapshotName);
251
252    // check that we get one snapshot
253    response = master.getMasterRpcServices().getCompletedSnapshots(null, request);
254    assertEquals("Found unexpected number of snapshots", 1, response.getSnapshotsCount());
255    List<SnapshotDescription> snapshots = response.getSnapshotsList();
256    List<SnapshotDescription> expected = Lists.newArrayList(snapshot);
257    assertEquals("Returned snapshots don't match created snapshots", expected, snapshots);
258
259    // write a second snapshot
260    snapshotName = "completed_two";
261    snapshot = createSnapshot(snapshotName);
262    expected.add(snapshot);
263
264    // check that we get one snapshot
265    response = master.getMasterRpcServices().getCompletedSnapshots(null, request);
266    assertEquals("Found unexpected number of snapshots", 2, response.getSnapshotsCount());
267    snapshots = response.getSnapshotsList();
268    assertEquals("Returned snapshots don't match created snapshots", expected, snapshots);
269  }
270
271  @Test
272  public void testDeleteSnapshot() throws Exception {
273
274    String snapshotName = "completed";
275    SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName(snapshotName).build();
276
277    DeleteSnapshotRequest request = DeleteSnapshotRequest.newBuilder().setSnapshot(snapshot)
278        .build();
279    try {
280      master.getMasterRpcServices().deleteSnapshot(null, request);
281      fail("Master didn't throw exception when attempting to delete snapshot that doesn't exist");
282    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) {
283      // Expected
284    }
285
286    // write one snapshot to the fs
287    createSnapshot(snapshotName);
288
289    // then delete the existing snapshot,which shouldn't cause an exception to be thrown
290    master.getMasterRpcServices().deleteSnapshot(null, request);
291  }
292
293  @Test
294  public void testGetCompletedSnapshotsWithCleanup() throws Exception {
295    // Enable auto snapshot cleanup for the cluster
296    SetSnapshotCleanupRequest setSnapshotCleanupRequest =
297        SetSnapshotCleanupRequest.newBuilder().setEnabled(true).build();
298    master.getMasterRpcServices().switchSnapshotCleanup(null, setSnapshotCleanupRequest);
299
300    // first check when there are no snapshots
301    GetCompletedSnapshotsRequest request = GetCompletedSnapshotsRequest.newBuilder().build();
302    GetCompletedSnapshotsResponse response =
303        master.getMasterRpcServices().getCompletedSnapshots(null, request);
304    assertEquals("Found unexpected number of snapshots", 0, response.getSnapshotsCount());
305
306    // NOTE: This is going to be flakey. Its timing based. For now made it more coarse
307    // so more likely to pass though we have to hang around longer.
308
309    // write one snapshot to the fs
310    createSnapshotWithTtl("snapshot_01", 5L);
311    createSnapshotWithTtl("snapshot_02", 100L);
312
313    // check that we get one snapshot
314    response = master.getMasterRpcServices().getCompletedSnapshots(null, request);
315    assertEquals("Found unexpected number of snapshots", 2, response.getSnapshotsCount());
316
317    // Check that 1 snapshot is auto cleaned after 5 sec of TTL expiration. Wait 10 seconds
318    // just in case.
319    Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS);
320    response = master.getMasterRpcServices().getCompletedSnapshots(null, request);
321    assertEquals("Found unexpected number of snapshots", 1, response.getSnapshotsCount());
322  }
323
324  @Test
325  public void testGetCompletedSnapshotsWithoutCleanup() throws Exception {
326    // Disable auto snapshot cleanup for the cluster
327    SetSnapshotCleanupRequest setSnapshotCleanupRequest =
328        SetSnapshotCleanupRequest.newBuilder().setEnabled(false).build();
329    master.getMasterRpcServices().switchSnapshotCleanup(null, setSnapshotCleanupRequest);
330
331    // first check when there are no snapshots
332    GetCompletedSnapshotsRequest request = GetCompletedSnapshotsRequest.newBuilder().build();
333    GetCompletedSnapshotsResponse response =
334        master.getMasterRpcServices().getCompletedSnapshots(null, request);
335    assertEquals("Found unexpected number of snapshots", 0, response.getSnapshotsCount());
336
337    // write one snapshot to the fs
338    createSnapshotWithTtl("snapshot_02", 1L);
339    createSnapshotWithTtl("snapshot_03", 1L);
340
341    // check that we get one snapshot
342    response = master.getMasterRpcServices().getCompletedSnapshots(null, request);
343    assertEquals("Found unexpected number of snapshots", 2, response.getSnapshotsCount());
344
345    // check that no snapshot is auto cleaned even after 1 sec of TTL expiration
346    Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
347    response = master.getMasterRpcServices().getCompletedSnapshots(null, request);
348    assertEquals("Found unexpected number of snapshots", 2, response.getSnapshotsCount());
349  }
350
351  @Test
352  public void testSnapshotCleanupStatus() throws Exception {
353    // Enable auto snapshot cleanup for the cluster
354    SetSnapshotCleanupRequest setSnapshotCleanupRequest =
355        SetSnapshotCleanupRequest.newBuilder().setEnabled(true).build();
356    master.getMasterRpcServices().switchSnapshotCleanup(null, setSnapshotCleanupRequest);
357
358    // Check if auto snapshot cleanup is enabled
359    IsSnapshotCleanupEnabledRequest isSnapshotCleanupEnabledRequest =
360        IsSnapshotCleanupEnabledRequest.newBuilder().build();
361    IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabledResponse =
362        master.getMasterRpcServices().isSnapshotCleanupEnabled(null,
363            isSnapshotCleanupEnabledRequest);
364    Assert.assertTrue(isSnapshotCleanupEnabledResponse.getEnabled());
365
366    // Disable auto snapshot cleanup for the cluster
367    setSnapshotCleanupRequest = SetSnapshotCleanupRequest.newBuilder()
368        .setEnabled(false).build();
369    master.getMasterRpcServices().switchSnapshotCleanup(null, setSnapshotCleanupRequest);
370
371    // Check if auto snapshot cleanup is disabled
372    isSnapshotCleanupEnabledRequest = IsSnapshotCleanupEnabledRequest
373        .newBuilder().build();
374    isSnapshotCleanupEnabledResponse =
375        master.getMasterRpcServices().isSnapshotCleanupEnabled(null,
376            isSnapshotCleanupEnabledRequest);
377    Assert.assertFalse(isSnapshotCleanupEnabledResponse.getEnabled());
378  }
379
380  /**
381   * Test that the snapshot hfile archive cleaner works correctly. HFiles that are in snapshots
382   * should be retained, while those that are not in a snapshot should be deleted.
383   * @throws Exception on failure
384   */
385  @Test
386  public void testSnapshotHFileArchiving() throws Exception {
387    Admin admin = UTIL.getAdmin();
388    // make sure we don't fail on listing snapshots
389    SnapshotTestingUtils.assertNoSnapshots(admin);
390
391    // recreate test table with disabled compactions; otherwise compaction may happen before
392    // snapshot, the call after snapshot will be a no-op and checks will fail
393    UTIL.deleteTable(TABLE_NAME);
394    TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME)
395            .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAM))
396            .setCompactionEnabled(false)
397            .build();
398    UTIL.getAdmin().createTable(td);
399
400    // load the table
401    for (int i = 0; i < blockingStoreFiles / 2; i ++) {
402      UTIL.loadTable(UTIL.getConnection().getTable(TABLE_NAME), TEST_FAM);
403      UTIL.flush(TABLE_NAME);
404    }
405
406    // disable the table so we can take a snapshot
407    admin.disableTable(TABLE_NAME);
408
409    // take a snapshot of the table
410    String snapshotName = "snapshot";
411    byte[] snapshotNameBytes = Bytes.toBytes(snapshotName);
412    admin.snapshot(snapshotName, TABLE_NAME);
413
414    LOG.info("After snapshot File-System state");
415    CommonFSUtils.logFileSystemState(fs, rootDir, LOG);
416
417    // ensure we only have one snapshot
418    SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshotNameBytes, TABLE_NAME);
419
420    td = TableDescriptorBuilder.newBuilder(td)
421            .setCompactionEnabled(true)
422            .build();
423    // enable compactions now
424    admin.modifyTable(td);
425
426    // renable the table so we can compact the regions
427    admin.enableTable(TABLE_NAME);
428
429    // compact the files so we get some archived files for the table we just snapshotted
430    List<HRegion> regions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
431    for (HRegion region : regions) {
432      region.waitForFlushesAndCompactions(); // enable can trigger a compaction, wait for it.
433      region.compactStores(); // min is 2 so will compact and archive
434    }
435    List<RegionServerThread> regionServerThreads = UTIL.getMiniHBaseCluster()
436        .getRegionServerThreads();
437    HRegionServer hrs = null;
438    for (RegionServerThread rs : regionServerThreads) {
439      if (!rs.getRegionServer().getRegions(TABLE_NAME).isEmpty()) {
440        hrs = rs.getRegionServer();
441        break;
442      }
443    }
444    CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(100, null, hrs, false);
445    cleaner.chore();
446    LOG.info("After compaction File-System state");
447    CommonFSUtils.logFileSystemState(fs, rootDir, LOG);
448
449    // make sure the cleaner has run
450    LOG.debug("Running hfile cleaners");
451    ensureHFileCleanersRun();
452    LOG.info("After cleaners File-System state: " + rootDir);
453    CommonFSUtils.logFileSystemState(fs, rootDir, LOG);
454
455    // get the snapshot files for the table
456    Path snapshotTable = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
457    Set<String> snapshotHFiles = SnapshotReferenceUtil.getHFileNames(
458        UTIL.getConfiguration(), fs, snapshotTable);
459    // check that the files in the archive contain the ones that we need for the snapshot
460    LOG.debug("Have snapshot hfiles:");
461    for (String fileName : snapshotHFiles) {
462      LOG.debug(fileName);
463    }
464    // get the archived files for the table
465    Collection<String> archives = getHFiles(archiveDir, fs, TABLE_NAME);
466
467    // get the hfiles for the table
468    Collection<String> hfiles = getHFiles(rootDir, fs, TABLE_NAME);
469
470    // and make sure that there is a proper subset
471    for (String fileName : snapshotHFiles) {
472      boolean exist = archives.contains(fileName) || hfiles.contains(fileName);
473      assertTrue("Archived hfiles " + archives
474        + " and table hfiles " + hfiles + " is missing snapshot file:" + fileName, exist);
475    }
476
477    // delete the existing snapshot
478    admin.deleteSnapshot(snapshotNameBytes);
479    SnapshotTestingUtils.assertNoSnapshots(admin);
480
481    // make sure that we don't keep around the hfiles that aren't in a snapshot
482    // make sure we wait long enough to refresh the snapshot hfile
483    List<BaseHFileCleanerDelegate> delegates = UTIL.getMiniHBaseCluster().getMaster()
484        .getHFileCleaner().cleanersChain;
485    for (BaseHFileCleanerDelegate delegate: delegates) {
486      if (delegate instanceof SnapshotHFileCleaner) {
487        ((SnapshotHFileCleaner)delegate).getFileCacheForTesting().triggerCacheRefreshForTesting();
488      }
489    }
490    // run the cleaner again
491    LOG.debug("Running hfile cleaners");
492    ensureHFileCleanersRun();
493    LOG.info("After delete snapshot cleaners run File-System state");
494    CommonFSUtils.logFileSystemState(fs, rootDir, LOG);
495
496    archives = getHFiles(archiveDir, fs, TABLE_NAME);
497    assertEquals("Still have some hfiles in the archive, when their snapshot has been deleted.", 0,
498      archives.size());
499  }
500
501  /**
502   * @return all the HFiles for a given table in the specified dir
503   * @throws IOException on expected failure
504   */
505  private final Collection<String> getHFiles(Path dir, FileSystem fs, TableName tableName) throws IOException {
506    Path tableDir = CommonFSUtils.getTableDir(dir, tableName);
507    return SnapshotTestingUtils.listHFileNames(fs, tableDir);
508  }
509
510  /**
511   * Make sure the {@link HFileCleaner HFileCleaners} run at least once
512   */
513  private static void ensureHFileCleanersRun() {
514    UTIL.getHBaseCluster().getMaster().getHFileCleaner().chore();
515  }
516
517  private SnapshotDescription createSnapshot(final String snapshotName) throws IOException {
518    SnapshotTestingUtils.SnapshotMock snapshotMock =
519      new SnapshotTestingUtils.SnapshotMock(UTIL.getConfiguration(), fs, rootDir);
520    SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder =
521      snapshotMock.createSnapshotV2(snapshotName, "test", 0);
522    builder.commit();
523    return builder.getSnapshotDescription();
524  }
525
526  private SnapshotDescription createSnapshotWithTtl(final String snapshotName, final long ttl)
527      throws IOException {
528    SnapshotTestingUtils.SnapshotMock snapshotMock =
529        new SnapshotTestingUtils.SnapshotMock(UTIL.getConfiguration(), fs, rootDir);
530    SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder =
531        snapshotMock.createSnapshotV2(snapshotName, "test", 0, ttl);
532    builder.commit();
533    return builder.getSnapshotDescription();
534  }
535
536  @Test
537  public void testAsyncSnapshotWillNotBlockSnapshotHFileCleaner() throws Exception {
538    // Write some data
539    Table table = UTIL.getConnection().getTable(TABLE_NAME);
540    for (int i = 0; i < 10; i++) {
541      Put put = new Put(Bytes.toBytes(i)).addColumn(TEST_FAM, Bytes.toBytes("q"), Bytes.toBytes(i));
542      table.put(put);
543    }
544    String snapshotName = "testAsyncSnapshotWillNotBlockSnapshotHFileCleaner01";
545    Future<Void> future =
546      UTIL.getAdmin().snapshotAsync(new org.apache.hadoop.hbase.client.SnapshotDescription(
547        snapshotName, TABLE_NAME, SnapshotType.FLUSH));
548    Waiter.waitFor(UTIL.getConfiguration(), 10 * 1000L, 200L,
549      () -> UTIL.getAdmin().listSnapshots(Pattern.compile(snapshotName)).size() == 1);
550    UTIL.waitFor(30000, () -> !master.getSnapshotManager().isTakingAnySnapshot());
551  }
552}