001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import static org.junit.jupiter.api.Assertions.assertEquals;
021import static org.junit.jupiter.api.Assertions.assertFalse;
022import static org.junit.jupiter.api.Assertions.assertTrue;
023
024import java.io.File;
025import java.util.ArrayList;
026import java.util.List;
027import org.apache.hadoop.fs.LocatedFileStatus;
028import org.apache.hadoop.fs.Path;
029import org.apache.hadoop.fs.RemoteIterator;
030import org.apache.hadoop.hbase.TableName;
031import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
032import org.apache.hadoop.hbase.backup.util.BackupUtils;
033import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
034import org.apache.hadoop.hbase.client.Connection;
035import org.apache.hadoop.hbase.client.ConnectionFactory;
036import org.apache.hadoop.hbase.client.TableDescriptor;
037import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
038import org.apache.hadoop.hbase.regionserver.HRegion;
039import org.apache.hadoop.hbase.testclassification.LargeTests;
040import org.apache.hadoop.hbase.util.Bytes;
041import org.apache.hadoop.hbase.util.HFileArchiveUtil;
042import org.junit.jupiter.api.Tag;
043import org.junit.jupiter.api.Test;
044
045import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
046
047@Tag(LargeTests.TAG)
048public class TestIncrementalBackupRestoreHandlesArchivedFiles
049  extends IncrementalBackupRestoreTestBase {
050
051  @Test
052  public void testIncBackupRestoreHandlesArchivedFiles() throws Exception {
053    byte[] fam2 = Bytes.toBytes("f2");
054    TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc)
055      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam2).build()).build();
056    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
057    try (Connection conn = ConnectionFactory.createConnection(conf1);
058      BackupAdminImpl admin = new BackupAdminImpl(conn)) {
059      String backupTargetDir = TEST_UTIL.getDataTestDir("backupTarget").toString();
060      BACKUP_ROOT_DIR = new File(backupTargetDir).toURI().toString();
061
062      List<TableName> tables = Lists.newArrayList(table1);
063
064      insertIntoTable(conn, table1, famName, 3, 100);
065      String fullBackupId = takeFullBackup(tables, admin, true);
066      assertTrue(checkSucceeded(fullBackupId));
067
068      insertIntoTable(conn, table1, famName, 4, 100);
069
070      HRegion regionToBulkload = TEST_UTIL.getHBaseCluster().getRegions(table1).get(0);
071      String regionName = regionToBulkload.getRegionInfo().getEncodedName();
072      // Requires a mult-fam bulkload to ensure we're appropriately handling
073      // multi-file bulkloads
074      Path regionDir = doBulkload(table1, regionName, famName, fam2);
075
076      // archive the files in the region directory
077      Path archiveDir =
078        HFileArchiveUtil.getStoreArchivePath(conf1, table1, regionName, Bytes.toString(famName));
079      TEST_UTIL.getTestFileSystem().mkdirs(archiveDir);
080      RemoteIterator<LocatedFileStatus> iter =
081        TEST_UTIL.getTestFileSystem().listFiles(regionDir, true);
082      List<Path> paths = new ArrayList<>();
083      while (iter.hasNext()) {
084        Path path = iter.next().getPath();
085        if (path.toString().contains("_SeqId_")) {
086          paths.add(path);
087        }
088      }
089      assertTrue(paths.size() > 1);
090      Path path = paths.get(0);
091      String name = path.toString();
092      int startIdx = name.lastIndexOf(Path.SEPARATOR);
093      String filename = name.substring(startIdx + 1);
094      Path archiveFile = new Path(archiveDir, filename);
095      // archive 1 of the files
096      boolean success = TEST_UTIL.getTestFileSystem().rename(path, archiveFile);
097      assertTrue(success);
098      assertTrue(TEST_UTIL.getTestFileSystem().exists(archiveFile));
099      assertFalse(TEST_UTIL.getTestFileSystem().exists(path));
100
101      BackupRequest request =
102        createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR, true);
103      String incrementalBackupId = admin.backupTables(request);
104      assertTrue(checkSucceeded(incrementalBackupId));
105
106      TableName[] fromTable = new TableName[] { table1 };
107      TableName[] toTable = new TableName[] { table1_restore };
108
109      admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, incrementalBackupId, false,
110        fromTable, toTable, true));
111
112      int actualRowCount = TEST_UTIL.countRows(table1_restore);
113      int expectedRowCount = TEST_UTIL.countRows(table1);
114      assertEquals(expectedRowCount, actualRowCount);
115    }
116  }
117}