001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import static org.junit.Assert.assertFalse;
021import static org.junit.Assert.assertTrue;
022
023import java.io.File;
024import java.util.List;
025import org.apache.hadoop.hbase.HBaseClassTestRule;
026import org.apache.hadoop.hbase.HBaseTestingUtil;
027import org.apache.hadoop.hbase.TableName;
028import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
029import org.apache.hadoop.hbase.backup.util.BackupUtils;
030import org.apache.hadoop.hbase.client.Admin;
031import org.apache.hadoop.hbase.client.Connection;
032import org.apache.hadoop.hbase.client.ConnectionFactory;
033import org.apache.hadoop.hbase.client.Table;
034import org.apache.hadoop.hbase.testclassification.LargeTests;
035import org.junit.Assert;
036import org.junit.ClassRule;
037import org.junit.Test;
038import org.junit.experimental.categories.Category;
039import org.slf4j.Logger;
040import org.slf4j.LoggerFactory;
041
042import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
043
044@Category(LargeTests.class)
045public class TestBackupMerge extends TestBackupBase {
046
047  @ClassRule
048  public static final HBaseClassTestRule CLASS_RULE =
049    HBaseClassTestRule.forClass(TestBackupMerge.class);
050
051  private static final Logger LOG = LoggerFactory.getLogger(TestBackupMerge.class);
052
053  @Test
054  public void TestIncBackupMergeRestore() throws Exception {
055    int ADD_ROWS = 99;
056    // #1 - create full backup for all tables
057    LOG.info("create full backup image for all tables");
058
059    List<TableName> tables = Lists.newArrayList(table1, table2);
060    // Set custom Merge Job implementation
061
062    Connection conn = ConnectionFactory.createConnection(conf1);
063
064    Admin admin = conn.getAdmin();
065    BackupAdminImpl client = new BackupAdminImpl(conn);
066
067    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
068    String backupIdFull = client.backupTables(request);
069
070    assertTrue(checkSucceeded(backupIdFull));
071
072    // #2 - insert some data to table1
073    Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
074    LOG.debug("writing {} rows to {}", ADD_ROWS, table1);
075
076    Assert.assertEquals(HBaseTestingUtil.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS);
077    t1.close();
078    LOG.debug("written {} rows to {}", ADD_ROWS, table1);
079
080    Table t2 = insertIntoTable(conn, table2, famName, 1, ADD_ROWS);
081
082    Assert.assertEquals(HBaseTestingUtil.countRows(t2), NB_ROWS_IN_BATCH + ADD_ROWS);
083    t2.close();
084    LOG.debug("written {} rows to {}", ADD_ROWS, table2);
085
086    // #3 - incremental backup for multiple tables
087    tables = Lists.newArrayList(table1, table2);
088    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
089    String backupIdIncMultiple = client.backupTables(request);
090
091    assertTrue(checkSucceeded(backupIdIncMultiple));
092
093    t1 = insertIntoTable(conn, table1, famName, 2, ADD_ROWS);
094    t1.close();
095
096    t2 = insertIntoTable(conn, table2, famName, 2, ADD_ROWS);
097    t2.close();
098
099    // #3 - incremental backup for multiple tables
100    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
101    String backupIdIncMultiple2 = client.backupTables(request);
102    assertTrue(checkSucceeded(backupIdIncMultiple2));
103
104    try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) {
105      String[] backups = new String[] { backupIdIncMultiple, backupIdIncMultiple2 };
106      bAdmin.mergeBackups(backups);
107    }
108
109    // #6 - restore incremental backup for multiple tables, with overwrite
110    TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
111    TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
112    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false,
113      tablesRestoreIncMultiple, tablesMapIncMultiple, true));
114
115    Table hTable = conn.getTable(table1_restore);
116    LOG.debug("After incremental restore: {}", hTable.getDescriptor());
117    int countRows = HBaseTestingUtil.countRows(hTable, famName);
118    LOG.debug("f1 has {} rows", countRows);
119    Assert.assertEquals(NB_ROWS_IN_BATCH + 2 * ADD_ROWS, countRows);
120
121    hTable.close();
122
123    hTable = conn.getTable(table2_restore);
124    Assert.assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + 2 * ADD_ROWS);
125    hTable.close();
126
127    admin.close();
128    conn.close();
129  }
130
131  @Test
132  public void testIncBackupMergeRestoreSeparateFs() throws Exception {
133    String originalBackupRoot = BACKUP_ROOT_DIR;
134    // prepare BACKUP_ROOT_DIR on a different filesystem from HBase.
135    String backupTargetDir = TEST_UTIL.getDataTestDir("backupTarget").toString();
136    BACKUP_ROOT_DIR = new File(backupTargetDir).toURI().toString();
137
138    try (Connection conn = ConnectionFactory.createConnection(conf1)) {
139      BackupAdminImpl client = new BackupAdminImpl(conn);
140      List<TableName> tables = Lists.newArrayList(table1, table2);
141
142      BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR, true);
143      String backupIdFull = client.backupTables(request);
144      assertTrue(checkSucceeded(backupIdFull));
145
146      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR, true);
147      String backupIdIncMultiple = client.backupTables(request);
148      assertTrue(checkSucceeded(backupIdIncMultiple));
149
150      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR, true);
151      String backupIdIncMultiple2 = client.backupTables(request);
152      assertTrue(checkSucceeded(backupIdIncMultiple2));
153
154      try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) {
155        String[] backups = new String[] { backupIdIncMultiple, backupIdIncMultiple2 };
156        // this throws java.lang.IllegalArgumentException: Wrong FS prior to HBASE-28539
157        bAdmin.mergeBackups(backups);
158      }
159
160      assertTrue(
161        new File(HBackupFileSystem.getBackupPath(BACKUP_ROOT_DIR, backupIdFull).toUri()).exists());
162      assertFalse(
163        new File(HBackupFileSystem.getBackupPath(BACKUP_ROOT_DIR, backupIdIncMultiple).toUri())
164          .exists());
165      assertTrue(
166        new File(HBackupFileSystem.getBackupPath(BACKUP_ROOT_DIR, backupIdIncMultiple2).toUri())
167          .exists());
168    } finally {
169      BACKUP_ROOT_DIR = originalBackupRoot;
170    }
171  }
172}