001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import static org.junit.jupiter.api.Assertions.assertFalse;
021import static org.junit.jupiter.api.Assertions.assertTrue;
022
023import org.apache.hadoop.conf.Configuration;
024import org.apache.hadoop.fs.FileSystem;
025import org.apache.hadoop.fs.Path;
026import org.apache.hadoop.hbase.HBaseTestingUtil;
027import org.apache.hadoop.hbase.HConstants;
028import org.apache.hadoop.hbase.TableName;
029import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
030import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob;
031import org.apache.hadoop.hbase.backup.util.BackupUtils;
032import org.apache.hadoop.hbase.client.Admin;
033import org.apache.hadoop.hbase.client.ConnectionFactory;
034import org.apache.hadoop.hbase.testclassification.LargeTests;
035import org.junit.jupiter.api.BeforeAll;
036import org.junit.jupiter.api.Tag;
037import org.junit.jupiter.api.Test;
038import org.slf4j.Logger;
039import org.slf4j.LoggerFactory;
040
041@Tag(LargeTests.TAG)
042public class TestRemoteRestore extends TestBackupBase {
043
044  private static final Logger LOG = LoggerFactory.getLogger(TestRemoteRestore.class);
045
046  /**
047   * Setup Cluster with appropriate configurations before running tests.
048   * @throws Exception if starting the mini cluster or setting up the tables fails
049   */
050  @BeforeAll
051  public static void setUp() throws Exception {
052    TEST_UTIL = new HBaseTestingUtil();
053    conf1 = TEST_UTIL.getConfiguration();
054    useSecondCluster = true;
055    setUpHelper();
056  }
057
058  /**
059   * Verify that a remote restore on a single table is successful.
060   * @throws Exception if doing the backup or an operation on the tables fails
061   */
062  @Test
063  public void testFullRestoreRemote() throws Exception {
064    LOG.info("test remote full backup on a single table");
065    String backupId =
066      backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR);
067    LOG.info("backup complete");
068    TableName[] tableset = new TableName[] { table1 };
069    TableName[] tablemap = new TableName[] { table1_restore };
070    getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId,
071      false, tableset, tablemap, false));
072    Admin hba = TEST_UTIL.getAdmin();
073    assertTrue(hba.tableExists(table1_restore));
074    TEST_UTIL.deleteTable(table1_restore);
075    hba.close();
076  }
077
078  /**
079   * Verify that restore jobs can be run on a standalone mapreduce cluster. Ensures hfiles output
080   * via {@link MapReduceHFileSplitterJob} exist on correct filesystem.
081   * @throws Exception if doing the backup or an operation on the tables fails
082   */
083  @Test
084  public void testFullRestoreRemoteWithAlternateRestoreOutputDir() throws Exception {
085    LOG.info("test remote full backup on a single table with alternate restore output dir");
086    String backupId =
087      backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR);
088    LOG.info("backup complete");
089    TableName[] tableset = new TableName[] { table1 };
090    TableName[] tablemap = new TableName[] { table1_restore };
091
092    HBaseTestingUtil mrTestUtil = new HBaseTestingUtil();
093    mrTestUtil.setZkCluster(TEST_UTIL.getZkCluster());
094    mrTestUtil.startMiniDFSCluster(3);
095    mrTestUtil.startMiniMapReduceCluster();
096
097    Configuration testUtilConf = TEST_UTIL.getConnection().getConfiguration();
098    Configuration conf = new Configuration(mrTestUtil.getConfiguration());
099    conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT,
100      testUtilConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
101    conf.set(HConstants.MASTER_ADDRS_KEY, testUtilConf.get(HConstants.MASTER_ADDRS_KEY));
102
103    new BackupAdminImpl(ConnectionFactory.createConnection(conf))
104      .restore(new RestoreRequest.Builder().withBackupRootDir(BACKUP_REMOTE_ROOT_DIR)
105        .withRestoreRootDir(BACKUP_ROOT_DIR).withBackupId(backupId).withCheck(false)
106        .withFromTables(tableset).withToTables(tablemap).withOverwrite(false).build());
107
108    Path hfileOutputPath = new Path(
109      new Path(conf.get(MapReduceHFileSplitterJob.BULK_OUTPUT_CONF_KEY)).toUri().getPath());
110
111    // files exist on hbase cluster
112    FileSystem fileSystem = FileSystem.get(TEST_UTIL.getConfiguration());
113    assertTrue(fileSystem.exists(hfileOutputPath));
114
115    // files don't exist on MR cluster
116    fileSystem = FileSystem.get(conf);
117    assertFalse(fileSystem.exists(hfileOutputPath));
118
119    Admin hba = TEST_UTIL.getAdmin();
120    assertTrue(hba.tableExists(table1_restore));
121    TEST_UTIL.deleteTable(table1_restore);
122    hba.close();
123  }
124}