001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import static org.junit.jupiter.api.Assertions.assertEquals;
021import static org.junit.jupiter.api.Assertions.assertFalse;
022import static org.junit.jupiter.api.Assertions.assertNotNull;
023import static org.junit.jupiter.api.Assertions.assertThrows;
024import static org.junit.jupiter.api.Assertions.assertTrue;
025import static org.junit.jupiter.api.Assertions.fail;
026import static org.mockito.ArgumentMatchers.any;
027import static org.mockito.Mockito.mock;
028import static org.mockito.Mockito.never;
029import static org.mockito.Mockito.times;
030import static org.mockito.Mockito.verify;
031import static org.mockito.Mockito.when;
032
033import java.io.IOException;
034import java.security.PrivilegedExceptionAction;
035import java.util.ArrayList;
036import java.util.Collection;
037import java.util.Collections;
038import java.util.HashMap;
039import java.util.List;
040import java.util.Map;
041import java.util.Objects;
042import java.util.stream.Collectors;
043import org.apache.hadoop.conf.Configuration;
044import org.apache.hadoop.fs.FileStatus;
045import org.apache.hadoop.fs.FileSystem;
046import org.apache.hadoop.fs.Path;
047import org.apache.hadoop.fs.PathFilter;
048import org.apache.hadoop.hbase.ChoreService;
049import org.apache.hadoop.hbase.HBaseTestingUtil;
050import org.apache.hadoop.hbase.HConstants;
051import org.apache.hadoop.hbase.Stoppable;
052import org.apache.hadoop.hbase.TableName;
053import org.apache.hadoop.hbase.client.Admin;
054import org.apache.hadoop.hbase.client.RegionInfo;
055import org.apache.hadoop.hbase.client.Table;
056import org.apache.hadoop.hbase.master.HMaster;
057import org.apache.hadoop.hbase.master.cleaner.DirScanPool;
058import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
059import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
060import org.apache.hadoop.hbase.regionserver.HRegion;
061import org.apache.hadoop.hbase.regionserver.HRegionServer;
062import org.apache.hadoop.hbase.regionserver.HStoreFile;
063import org.apache.hadoop.hbase.testclassification.LargeTests;
064import org.apache.hadoop.hbase.testclassification.MiscTests;
065import org.apache.hadoop.hbase.util.Bytes;
066import org.apache.hadoop.hbase.util.CommonFSUtils;
067import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
068import org.apache.hadoop.hbase.util.FSUtils;
069import org.apache.hadoop.hbase.util.HFileArchiveTestingUtil;
070import org.apache.hadoop.hbase.util.HFileArchiveUtil;
071import org.apache.hadoop.hbase.util.StoppableImplementation;
072import org.apache.hadoop.security.UserGroupInformation;
073import org.junit.jupiter.api.AfterAll;
074import org.junit.jupiter.api.AfterEach;
075import org.junit.jupiter.api.BeforeAll;
076import org.junit.jupiter.api.Tag;
077import org.junit.jupiter.api.Test;
078import org.junit.jupiter.api.TestInfo;
079import org.mockito.ArgumentCaptor;
080import org.slf4j.Logger;
081import org.slf4j.LoggerFactory;
082
083/**
084 * Test that the {@link HFileArchiver} correctly removes all the parts of a region when cleaning up
085 * a region
086 */
087@Tag(LargeTests.TAG)
088@Tag(MiscTests.TAG)
089public class TestHFileArchiving {
090
091  private static final Logger LOG = LoggerFactory.getLogger(TestHFileArchiving.class);
092  private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
093  private static final byte[] TEST_FAM = Bytes.toBytes("fam");
094
095  private static DirScanPool POOL;
096
097  private String testMethodName(TestInfo testInfo) {
098    return testInfo.getTestMethod().get().getName();
099  }
100
101  /**
102   * Setup the config for the cluster
103   */
104  @BeforeAll
105  public static void setupCluster() throws Exception {
106    setupConf(UTIL.getConfiguration());
107    UTIL.startMiniCluster();
108
109    // We don't want the cleaner to remove files. The tests do that.
110    UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().cancel(true);
111
112    POOL = DirScanPool.getHFileCleanerScanPool(UTIL.getConfiguration());
113  }
114
115  private static void setupConf(Configuration conf) {
116    // disable the ui
117    conf.setInt("hbase.regionsever.info.port", -1);
118    // drop the memstore size so we get flushes
119    conf.setInt("hbase.hregion.memstore.flush.size", 25000);
120    // disable major compactions
121    conf.setInt(HConstants.MAJOR_COMPACTION_PERIOD, 0);
122
123    // prevent aggressive region split
124    conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
125      ConstantSizeRegionSplitPolicy.class.getName());
126  }
127
128  @AfterEach
129  public void tearDown() throws Exception {
130    // cleanup the archive directory
131    clearArchiveDirectory();
132  }
133
134  @AfterAll
135  public static void cleanupTest() throws Exception {
136    UTIL.shutdownMiniCluster();
137    POOL.shutdownNow();
138  }
139
140  @Test
141  public void testArchiveStoreFilesDifferentFileSystemsWallWithSchemaPlainRoot() throws Exception {
142    String walDir = "mockFS://mockFSAuthority:9876/mockDir/wals/";
143    String baseDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()).toString() + "/";
144    testArchiveStoreFilesDifferentFileSystems(walDir, baseDir, HFileArchiver::archiveStoreFiles);
145  }
146
147  @Test
148  public void testArchiveStoreFilesDifferentFileSystemsWallNullPlainRoot() throws Exception {
149    String baseDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()).toString() + "/";
150    testArchiveStoreFilesDifferentFileSystems(null, baseDir, HFileArchiver::archiveStoreFiles);
151  }
152
153  @Test
154  public void testArchiveStoreFilesDifferentFileSystemsWallAndRootSame() throws Exception {
155    String baseDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()).toString() + "/";
156    testArchiveStoreFilesDifferentFileSystems("/hbase/wals/", baseDir,
157      HFileArchiver::archiveStoreFiles);
158  }
159
160  @Test
161  public void testArchiveStoreFilesDifferentFileSystemsFileAlreadyArchived() throws Exception {
162    String baseDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()).toString() + "/";
163    testArchiveStoreFilesDifferentFileSystems("/hbase/wals/", baseDir, true, false, false,
164      HFileArchiver::archiveStoreFiles);
165  }
166
167  @Test
168  public void testArchiveStoreFilesDifferentFileSystemsArchiveFileMatchCurrent() throws Exception {
169    String baseDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()).toString() + "/";
170    testArchiveStoreFilesDifferentFileSystems("/hbase/wals/", baseDir, true, true, false,
171      HFileArchiver::archiveStoreFiles);
172  }
173
174  @Test
175  public void testArchiveStoreFilesDifferentFileSystemsArchiveFileMismatch() throws Exception {
176    String baseDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()).toString() + "/";
177    assertThrows(IOException.class, () -> testArchiveStoreFilesDifferentFileSystems("/hbase/wals/",
178      baseDir, true, true, true, HFileArchiver::archiveStoreFiles));
179  }
180
181  private void testArchiveStoreFilesDifferentFileSystems(String walDir, String expectedBase,
182    ArchivingFunction<Configuration, FileSystem, RegionInfo, Path, byte[],
183      Collection<HStoreFile>> archivingFunction)
184    throws IOException {
185    testArchiveStoreFilesDifferentFileSystems(walDir, expectedBase, false, true, false,
186      archivingFunction);
187  }
188
189  private void testArchiveStoreFilesDifferentFileSystems(String walDir, String expectedBase,
190    boolean archiveFileExists, boolean sourceFileExists, boolean archiveFileDifferentLength,
191    ArchivingFunction<Configuration, FileSystem, RegionInfo, Path, byte[],
192      Collection<HStoreFile>> archivingFunction)
193    throws IOException {
194    FileSystem mockedFileSystem = mock(FileSystem.class);
195    Configuration conf = new Configuration(UTIL.getConfiguration());
196    if (walDir != null) {
197      conf.set(CommonFSUtils.HBASE_WAL_DIR, walDir);
198    }
199    when(mockedFileSystem.getScheme()).thenReturn("mockFS");
200    when(mockedFileSystem.mkdirs(any())).thenReturn(true);
201    HashMap<Path, Boolean> existsTracker = new HashMap<>();
202    Path filePath = new Path("/mockDir/wals/mockFile");
203    String expectedDir = expectedBase
204      + "archive/data/default/mockTable/mocked-region-encoded-name/testfamily/mockFile";
205    existsTracker.put(new Path(expectedDir), archiveFileExists);
206    existsTracker.put(filePath, sourceFileExists);
207    when(mockedFileSystem.exists(any()))
208      .thenAnswer(invocation -> existsTracker.getOrDefault((Path) invocation.getArgument(0), true));
209    FileStatus mockedStatus = mock(FileStatus.class);
210    when(mockedStatus.getLen()).thenReturn(12L).thenReturn(archiveFileDifferentLength ? 34L : 12L);
211    when(mockedFileSystem.getFileStatus(any())).thenReturn(mockedStatus);
212    RegionInfo mockedRegion = mock(RegionInfo.class);
213    TableName tableName = TableName.valueOf("mockTable");
214    when(mockedRegion.getTable()).thenReturn(tableName);
215    when(mockedRegion.getEncodedName()).thenReturn("mocked-region-encoded-name");
216    Path tableDir = new Path("mockFS://mockDir/tabledir");
217    byte[] family = Bytes.toBytes("testfamily");
218    HStoreFile mockedFile = mock(HStoreFile.class);
219    List<HStoreFile> list = new ArrayList<>();
220    list.add(mockedFile);
221    when(mockedFile.getPath()).thenReturn(filePath);
222    when(mockedFileSystem.rename(any(), any())).thenReturn(true);
223    archivingFunction.apply(conf, mockedFileSystem, mockedRegion, tableDir, family, list);
224
225    if (sourceFileExists) {
226      ArgumentCaptor<Path> srcPath = ArgumentCaptor.forClass(Path.class);
227      ArgumentCaptor<Path> destPath = ArgumentCaptor.forClass(Path.class);
228      if (archiveFileExists) {
229        // Verify we renamed the archived file to sideline, and then renamed the source file.
230        verify(mockedFileSystem, times(2)).rename(srcPath.capture(), destPath.capture());
231        assertEquals(expectedDir, srcPath.getAllValues().get(0).toString());
232        assertEquals(filePath, srcPath.getAllValues().get(1));
233        assertEquals(expectedDir, destPath.getAllValues().get(1).toString());
234      } else {
235        // Verify we renamed the source file to the archived file.
236        verify(mockedFileSystem, times(1)).rename(srcPath.capture(), destPath.capture());
237        assertEquals(filePath, srcPath.getAllValues().get(0));
238        assertEquals(expectedDir, destPath.getAllValues().get(0).toString());
239      }
240    } else {
241      if (archiveFileExists) {
242        // Verify we did not rename. No source file with a present archive file should be a no-op.
243        verify(mockedFileSystem, never()).rename(any(), any());
244      } else {
245        fail("Unsupported test conditions: sourceFileExists and archiveFileExists both false.");
246      }
247    }
248  }
249
250  @FunctionalInterface
251  private interface ArchivingFunction<Configuration, FS, Region, Dir, Family, Files> {
252    void apply(Configuration config, FS fs, Region region, Dir dir, Family family, Files files)
253      throws IOException;
254  }
255
256  @Test
257  public void testArchiveRecoveredEditsWalDirNull() throws Exception {
258    testArchiveRecoveredEditsWalDirNullOrSame(null);
259  }
260
261  @Test
262  public void testArchiveRecoveredEditsWalDirSameFsStoreFiles() throws Exception {
263    testArchiveRecoveredEditsWalDirNullOrSame("/wal-dir");
264  }
265
266  private void testArchiveRecoveredEditsWalDirNullOrSame(String walDir) throws Exception {
267    String originalRootDir = UTIL.getConfiguration().get(HConstants.HBASE_DIR);
268    try {
269      String baseDir = "mockFS://mockFSAuthority:9876/hbase/";
270      UTIL.getConfiguration().set(HConstants.HBASE_DIR, baseDir);
271      testArchiveStoreFilesDifferentFileSystems(walDir, baseDir, (conf, fs, region, dir, family,
272        list) -> HFileArchiver.archiveRecoveredEdits(conf, fs, region, family, list));
273    } finally {
274      UTIL.getConfiguration().set(HConstants.HBASE_DIR, originalRootDir);
275    }
276  }
277
278  @Test
279  public void testArchiveRecoveredEditsWrongFS() throws Exception {
280    String baseDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()).toString() + "/";
281    // Internally, testArchiveStoreFilesDifferentFileSystems will pass a "mockedFS"
282    // to HFileArchiver.archiveRecoveredEdits, but since wal-dir is supposedly on same FS
283    // as root dir it would lead to conflicting FSes and an IOException is expected.
284    assertThrows(IOException.class,
285      () -> testArchiveStoreFilesDifferentFileSystems("/wal-dir", baseDir, (conf, fs, region, dir,
286        family, list) -> HFileArchiver.archiveRecoveredEdits(conf, fs, region, family, list)));
287  }
288
289  @Test
290  public void testArchiveRecoveredEditsWalDirDifferentFS() throws Exception {
291    String walDir = "mockFS://mockFSAuthority:9876/mockDir/wals/";
292    testArchiveStoreFilesDifferentFileSystems(walDir, walDir, (conf, fs, region, dir, family,
293      list) -> HFileArchiver.archiveRecoveredEdits(conf, fs, region, family, list));
294  }
295
296  @Test
297  public void testRemoveRegionDirOnArchive(TestInfo testInfo) throws Exception {
298    final TableName tableName = TableName.valueOf(testMethodName(testInfo));
299    UTIL.createTable(tableName, TEST_FAM);
300
301    final Admin admin = UTIL.getAdmin();
302
303    // get the current store files for the region
304    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(tableName);
305    // make sure we only have 1 region serving this table
306    assertEquals(1, servingRegions.size());
307    HRegion region = servingRegions.get(0);
308
309    // and load the table
310    UTIL.loadRegion(region, TEST_FAM);
311
312    // shutdown the table so we can manipulate the files
313    admin.disableTable(tableName);
314
315    FileSystem fs = UTIL.getTestFileSystem();
316
317    // now attempt to depose the region
318    Path rootDir = region.getRegionFileSystem().getTableDir().getParent();
319    Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, region.getRegionInfo());
320
321    HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
322
323    // check for the existence of the archive directory and some files in it
324    Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region);
325    assertTrue(fs.exists(archiveDir));
326
327    // check to make sure the store directory was copied
328    FileStatus[] stores = fs.listStatus(archiveDir, new PathFilter() {
329      @Override
330      public boolean accept(Path p) {
331        if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) {
332          return false;
333        }
334        return true;
335      }
336    });
337    assertTrue(stores.length == 1);
338
339    // make sure we archived the store files
340    FileStatus[] storeFiles = fs.listStatus(stores[0].getPath());
341    assertTrue(storeFiles.length > 0);
342
343    // then ensure the region's directory isn't present
344    assertFalse(fs.exists(regionDir));
345
346    UTIL.deleteTable(tableName);
347  }
348
349  /**
350   * Test that the region directory is removed when we archive a region without store files, but
351   * still has hidden files.
352   * @throws IOException throws an IOException if there's problem creating a table or if there's an
353   *                     issue with accessing FileSystem.
354   */
355  @Test
356  public void testDeleteRegionWithNoStoreFiles(TestInfo testInfo) throws IOException {
357    final TableName tableName = TableName.valueOf(testMethodName(testInfo));
358    UTIL.createTable(tableName, TEST_FAM);
359
360    // get the current store files for the region
361    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(tableName);
362    // make sure we only have 1 region serving this table
363    assertEquals(1, servingRegions.size());
364    HRegion region = servingRegions.get(0);
365
366    FileSystem fs = region.getRegionFileSystem().getFileSystem();
367
368    // make sure there are some files in the regiondir
369    Path rootDir = CommonFSUtils.getRootDir(fs.getConf());
370    Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, region.getRegionInfo());
371    FileStatus[] regionFiles = CommonFSUtils.listStatus(fs, regionDir, null);
372    assertNotNull(regionFiles, "No files in the region directory");
373    if (LOG.isDebugEnabled()) {
374      List<Path> files = new ArrayList<>();
375      for (FileStatus file : regionFiles) {
376        files.add(file.getPath());
377      }
378      LOG.debug("Current files:" + files);
379    }
380    // delete the visible folders so we just have hidden files/folders
381    final PathFilter dirFilter = new FSUtils.DirFilter(fs);
382    PathFilter nonHidden = new PathFilter() {
383      @Override
384      public boolean accept(Path file) {
385        return dirFilter.accept(file) && !file.getName().startsWith(".");
386      }
387    };
388    FileStatus[] storeDirs = CommonFSUtils.listStatus(fs, regionDir, nonHidden);
389    for (FileStatus store : storeDirs) {
390      LOG.debug("Deleting store for test");
391      fs.delete(store.getPath(), true);
392    }
393
394    // then archive the region
395    HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
396
397    // and check to make sure the region directoy got deleted
398    assertFalse(fs.exists(regionDir), "Region directory (" + regionDir + "), still exists.");
399
400    UTIL.deleteTable(tableName);
401  }
402
403  private List<HRegion> initTableForArchivingRegions(TableName tableName) throws IOException {
404    final byte[][] splitKeys =
405      new byte[][] { Bytes.toBytes("b"), Bytes.toBytes("c"), Bytes.toBytes("d") };
406
407    UTIL.createTable(tableName, TEST_FAM, splitKeys);
408
409    // get the current store files for the regions
410    List<HRegion> regions = UTIL.getHBaseCluster().getRegions(tableName);
411    // make sure we have 4 regions serving this table
412    assertEquals(4, regions.size());
413
414    // and load the table
415    try (Table table = UTIL.getConnection().getTable(tableName)) {
416      UTIL.loadTable(table, TEST_FAM);
417    }
418
419    // disable the table so that we can manipulate the files
420    UTIL.getAdmin().disableTable(tableName);
421
422    return regions;
423  }
424
425  @Test
426  public void testArchiveRegions(TestInfo testInfo) throws Exception {
427    final TableName tableName = TableName.valueOf(testMethodName(testInfo));
428    List<HRegion> regions = initTableForArchivingRegions(tableName);
429
430    FileSystem fs = UTIL.getTestFileSystem();
431
432    // now attempt to depose the regions
433    Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration());
434    Path tableDir = CommonFSUtils.getTableDir(rootDir, regions.get(0).getRegionInfo().getTable());
435    List<Path> regionDirList = regions.stream()
436      .map(region -> FSUtils.getRegionDirFromTableDir(tableDir, region.getRegionInfo()))
437      .collect(Collectors.toList());
438
439    HFileArchiver.archiveRegions(UTIL.getConfiguration(), fs, rootDir, tableDir, regionDirList);
440
441    // check for the existence of the archive directory and some files in it
442    for (HRegion region : regions) {
443      Path archiveDir =
444        HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region);
445      assertTrue(fs.exists(archiveDir));
446
447      // check to make sure the store directory was copied
448      FileStatus[] stores =
449        fs.listStatus(archiveDir, p -> !p.getName().contains(HConstants.RECOVERED_EDITS_DIR));
450      assertTrue(stores.length == 1);
451
452      // make sure we archived the store files
453      FileStatus[] storeFiles = fs.listStatus(stores[0].getPath());
454      assertTrue(storeFiles.length > 0);
455    }
456
457    // then ensure the region's directories aren't present
458    for (Path regionDir : regionDirList) {
459      assertFalse(fs.exists(regionDir));
460    }
461
462    UTIL.deleteTable(tableName);
463  }
464
465  @Test
466  public void testArchiveRegionsWhenPermissionDenied(TestInfo testInfo) throws Exception {
467    final TableName tableName = TableName.valueOf(testMethodName(testInfo));
468    List<HRegion> regions = initTableForArchivingRegions(tableName);
469
470    // now attempt to depose the regions
471    Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration());
472    Path tableDir = CommonFSUtils.getTableDir(rootDir, regions.get(0).getRegionInfo().getTable());
473    List<Path> regionDirList = regions.stream()
474      .map(region -> FSUtils.getRegionDirFromTableDir(tableDir, region.getRegionInfo()))
475      .collect(Collectors.toList());
476
477    // To create a permission denied error, we do archive regions as a non-current user
478    UserGroupInformation ugi =
479      UserGroupInformation.createUserForTesting("foo1234", new String[] { "group1" });
480
481    try {
482      IOException e =
483        assertThrows(IOException.class, () -> ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
484          FileSystem fs = UTIL.getTestFileSystem();
485          HFileArchiver.archiveRegions(UTIL.getConfiguration(), fs, rootDir, tableDir,
486            regionDirList);
487          return null;
488        }));
489      assertTrue(e.getCause().getMessage().contains("Permission denied"));
490    } finally {
491      UTIL.deleteTable(tableName);
492    }
493  }
494
495  @Test
496  public void testArchiveOnTableDelete(TestInfo testInfo) throws Exception {
497    final TableName tableName = TableName.valueOf(testMethodName(testInfo));
498    UTIL.createTable(tableName, TEST_FAM);
499
500    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(tableName);
501    // make sure we only have 1 region serving this table
502    assertEquals(1, servingRegions.size());
503    HRegion region = servingRegions.get(0);
504
505    // get the parent RS and monitor
506    HRegionServer hrs = UTIL.getRSForFirstRegionInTable(tableName);
507    FileSystem fs = hrs.getFileSystem();
508
509    // put some data on the region
510    LOG.debug("-------Loading table");
511    UTIL.loadRegion(region, TEST_FAM);
512
513    // get the hfiles in the region
514    List<HRegion> regions = hrs.getRegions(tableName);
515    assertEquals(1, regions.size(), "More that 1 region for test table.");
516
517    region = regions.get(0);
518    // wait for all the compactions to complete
519    region.waitForFlushesAndCompactions();
520
521    // disable table to prevent new updates
522    UTIL.getAdmin().disableTable(tableName);
523    LOG.debug("Disabled table");
524
525    // remove all the files from the archive to get a fair comparison
526    clearArchiveDirectory();
527
528    // then get the current store files
529    byte[][] columns = region.getTableDescriptor().getColumnFamilyNames().toArray(new byte[0][]);
530    List<String> storeFiles = region.getStoreFileList(columns);
531
532    // then delete the table so the hfiles get archived
533    UTIL.deleteTable(tableName);
534    LOG.debug("Deleted table");
535
536    assertArchiveFiles(fs, storeFiles, 30000);
537  }
538
539  private void assertArchiveFiles(FileSystem fs, List<String> storeFiles, long timeout)
540    throws IOException {
541    long end = EnvironmentEdgeManager.currentTime() + timeout;
542    Path archiveDir = HFileArchiveUtil.getArchivePath(UTIL.getConfiguration());
543    List<String> archivedFiles = new ArrayList<>();
544
545    // We have to ensure that the DeleteTableHandler is finished. HBaseAdmin.deleteXXX()
546    // can return before all files
547    // are archived. We should fix HBASE-5487 and fix synchronous operations from admin.
548    while (EnvironmentEdgeManager.currentTime() < end) {
549      archivedFiles = getAllFileNames(fs, archiveDir);
550      if (archivedFiles.size() >= storeFiles.size()) {
551        break;
552      }
553    }
554
555    Collections.sort(storeFiles);
556    Collections.sort(archivedFiles);
557
558    LOG.debug("Store files:");
559    for (int i = 0; i < storeFiles.size(); i++) {
560      LOG.debug(i + " - " + storeFiles.get(i));
561    }
562    LOG.debug("Archive files:");
563    for (int i = 0; i < archivedFiles.size(); i++) {
564      LOG.debug(i + " - " + archivedFiles.get(i));
565    }
566
567    assertTrue(archivedFiles.containsAll(storeFiles),
568      "Archived files are missing some of the store files!");
569  }
570
571  /**
572   * Test that the store files are archived when a column family is removed.
573   * @throws java.io.IOException            if there's a problem creating a table.
574   * @throws java.lang.InterruptedException problem getting a RegionServer.
575   */
576  @Test
577  public void testArchiveOnTableFamilyDelete(TestInfo testInfo)
578    throws IOException, InterruptedException {
579    final TableName tableName = TableName.valueOf(testMethodName(testInfo));
580    UTIL.createTable(tableName, new byte[][] { TEST_FAM, Bytes.toBytes("fam2") });
581
582    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(tableName);
583    // make sure we only have 1 region serving this table
584    assertEquals(1, servingRegions.size());
585    HRegion region = servingRegions.get(0);
586
587    // get the parent RS and monitor
588    HRegionServer hrs = UTIL.getRSForFirstRegionInTable(tableName);
589    FileSystem fs = hrs.getFileSystem();
590
591    // put some data on the region
592    LOG.debug("-------Loading table");
593    UTIL.loadRegion(region, TEST_FAM);
594
595    // get the hfiles in the region
596    List<HRegion> regions = hrs.getRegions(tableName);
597    assertEquals(1, regions.size(), "More that 1 region for test table.");
598
599    region = regions.get(0);
600    // wait for all the compactions to complete
601    region.waitForFlushesAndCompactions();
602
603    // disable table to prevent new updates
604    UTIL.getAdmin().disableTable(tableName);
605    LOG.debug("Disabled table");
606
607    // remove all the files from the archive to get a fair comparison
608    clearArchiveDirectory();
609
610    // then get the current store files
611    byte[][] columns = region.getTableDescriptor().getColumnFamilyNames().toArray(new byte[0][]);
612    List<String> storeFiles = region.getStoreFileList(columns);
613
614    // then delete the table so the hfiles get archived
615    UTIL.getAdmin().deleteColumnFamily(tableName, TEST_FAM);
616
617    assertArchiveFiles(fs, storeFiles, 30000);
618
619    UTIL.deleteTable(tableName);
620  }
621
622  /**
623   * Test HFileArchiver.resolveAndArchive() race condition HBASE-7643
624   */
625  @Test
626  public void testCleaningRace(TestInfo testInfo) throws Exception {
627    final long TEST_TIME = 20 * 1000;
628    final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
629
630    Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
631    Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
632    FileSystem fs = UTIL.getTestFileSystem();
633
634    Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
635    Path regionDir = new Path(
636      CommonFSUtils.getTableDir(new Path("./"), TableName.valueOf(testMethodName(testInfo))),
637      "abcdef");
638    Path familyDir = new Path(regionDir, "cf");
639
640    Path sourceRegionDir = new Path(rootDir, regionDir);
641    fs.mkdirs(sourceRegionDir);
642
643    Stoppable stoppable = new StoppableImplementation();
644
645    // The cleaner should be looping without long pauses to reproduce the race condition.
646    HFileCleaner cleaner = getHFileCleaner(stoppable, conf, fs, archiveDir);
647    assertNotNull(cleaner, "cleaner should not be null");
648    try {
649      choreService.scheduleChore(cleaner);
650      // Keep creating/archiving new files while the cleaner is running in the other thread
651      long startTime = EnvironmentEdgeManager.currentTime();
652      for (long fid = 0; (EnvironmentEdgeManager.currentTime() - startTime) < TEST_TIME; ++fid) {
653        Path file = new Path(familyDir, String.valueOf(fid));
654        Path sourceFile = new Path(rootDir, file);
655        Path archiveFile = new Path(archiveDir, file);
656
657        fs.createNewFile(sourceFile);
658
659        try {
660          // Try to archive the file
661          HFileArchiver.archiveRegion(conf, fs, rootDir, sourceRegionDir.getParent(),
662            sourceRegionDir);
663
664          // The archiver succeded, the file is no longer in the original location
665          // but it's in the archive location.
666          LOG.debug("hfile=" + fid + " should be in the archive");
667          assertTrue(fs.exists(archiveFile));
668          assertFalse(fs.exists(sourceFile));
669        } catch (IOException e) {
670          // The archiver is unable to archive the file. Probably HBASE-7643 race condition.
671          // in this case, the file should not be archived, and we should have the file
672          // in the original location.
673          LOG.debug("hfile=" + fid + " should be in the source location");
674          assertFalse(fs.exists(archiveFile));
675          assertTrue(fs.exists(sourceFile));
676
677          // Avoid to have this file in the next run
678          fs.delete(sourceFile, false);
679        }
680      }
681    } finally {
682      stoppable.stop("test end");
683      cleaner.cancel(true);
684      choreService.shutdown();
685      fs.delete(rootDir, true);
686    }
687  }
688
689  @Test
690  public void testArchiveRegionTableAndRegionDirsNull() throws IOException {
691    Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
692    FileSystem fileSystem = UTIL.getTestFileSystem();
693    Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
694    // Try to archive the file but with null regionDir, can't delete sourceFile
695    assertFalse(HFileArchiver.archiveRegion(conf, fileSystem, rootDir, null, null));
696  }
697
698  @Test
699  public void testArchiveRegionWithTableDirNull(TestInfo testInfo) throws IOException {
700    Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
701    Path regionDir = new Path(
702      CommonFSUtils.getTableDir(new Path("./"), TableName.valueOf(testMethodName(testInfo))),
703      "xyzabc");
704    Path familyDir = new Path(regionDir, "rd");
705    Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
706    Path file = new Path(familyDir, "1");
707    Path sourceFile = new Path(rootDir, file);
708    FileSystem fileSystem = UTIL.getTestFileSystem();
709    fileSystem.createNewFile(sourceFile);
710    Path sourceRegionDir = new Path(rootDir, regionDir);
711    fileSystem.mkdirs(sourceRegionDir);
712    // Try to archive the file
713    assertFalse(HFileArchiver.archiveRegion(conf, fileSystem, rootDir, null, sourceRegionDir));
714    assertFalse(fileSystem.exists(sourceRegionDir));
715  }
716
717  @Test
718  public void testArchiveRegionWithRegionDirNull(TestInfo testInfo) throws IOException {
719    Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
720    Path regionDir = new Path(
721      CommonFSUtils.getTableDir(new Path("./"), TableName.valueOf(testMethodName(testInfo))),
722      "elgn4nf");
723    Path familyDir = new Path(regionDir, "rdar");
724    Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
725    Path file = new Path(familyDir, "2");
726    Path sourceFile = new Path(rootDir, file);
727    FileSystem fileSystem = UTIL.getTestFileSystem();
728    fileSystem.createNewFile(sourceFile);
729    Path sourceRegionDir = new Path(rootDir, regionDir);
730    fileSystem.mkdirs(sourceRegionDir);
731    // Try to archive the file but with null regionDir, can't delete sourceFile
732    assertFalse(
733      HFileArchiver.archiveRegion(conf, fileSystem, rootDir, sourceRegionDir.getParent(), null));
734    assertTrue(fileSystem.exists(sourceRegionDir));
735    fileSystem.delete(sourceRegionDir, true);
736  }
737
738  // Avoid passing a null master to CleanerChore, see HBASE-21175
739  private HFileCleaner getHFileCleaner(Stoppable stoppable, Configuration conf, FileSystem fs,
740    Path archiveDir) throws IOException {
741    Map<String, Object> params = new HashMap<>();
742    params.put(HMaster.MASTER, UTIL.getMiniHBaseCluster().getMaster());
743    HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir, POOL);
744    return Objects.requireNonNull(cleaner);
745  }
746
747  private void clearArchiveDirectory() throws IOException {
748    UTIL.getTestFileSystem()
749      .delete(new Path(UTIL.getDefaultRootDirPath(), HConstants.HFILE_ARCHIVE_DIRECTORY), true);
750  }
751
752  /**
753   * Get the names of all the files below the given directory
754   * @param fs         the file system to inspect
755   * @param archiveDir the directory in which to look
756   * @return a list of all files in the directory and sub-directories
757   * @throws java.io.IOException throws IOException in case FS is unavailable
758   */
759  private List<String> getAllFileNames(final FileSystem fs, Path archiveDir) throws IOException {
760    FileStatus[] files = CommonFSUtils.listStatus(fs, archiveDir, new PathFilter() {
761      @Override
762      public boolean accept(Path p) {
763        if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) {
764          return false;
765        }
766        return true;
767      }
768    });
769    return recurseOnFiles(fs, files, new ArrayList<>());
770  }
771
772  /** Recursively lookup all the file names under the file[] array **/
773  private List<String> recurseOnFiles(FileSystem fs, FileStatus[] files, List<String> fileNames)
774    throws IOException {
775    if (files == null || files.length == 0) {
776      return fileNames;
777    }
778
779    for (FileStatus file : files) {
780      if (file.isDirectory()) {
781        recurseOnFiles(fs, CommonFSUtils.listStatus(fs, file.getPath(), null), fileNames);
782      } else {
783        fileNames.add(file.getPath().getName());
784      }
785    }
786    return fileNames;
787  }
788}