001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import static org.junit.Assert.assertTrue;
021
022import java.util.List;
023import org.apache.hadoop.hbase.HBaseClassTestRule;
024import org.apache.hadoop.hbase.TableName;
025import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
026import org.apache.hadoop.hbase.backup.util.BackupUtils;
027import org.apache.hadoop.hbase.client.Admin;
028import org.apache.hadoop.hbase.client.Connection;
029import org.apache.hadoop.hbase.client.ConnectionFactory;
030import org.apache.hadoop.hbase.client.Table;
031import org.apache.hadoop.hbase.testclassification.LargeTests;
032import org.junit.Assert;
033import org.junit.ClassRule;
034import org.junit.Test;
035import org.junit.experimental.categories.Category;
036import org.slf4j.Logger;
037import org.slf4j.LoggerFactory;
038
039import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
040
041@Category(LargeTests.class)
042public class TestBackupMerge extends TestBackupBase {
043
044  @ClassRule
045  public static final HBaseClassTestRule CLASS_RULE =
046      HBaseClassTestRule.forClass(TestBackupMerge.class);
047
048  private static final Logger LOG =
049      LoggerFactory.getLogger(TestBackupMerge.class);
050
051
052
053  @Test
054  public void TestIncBackupMergeRestore() throws Exception {
055    int ADD_ROWS = 99;
056    // #1 - create full backup for all tables
057    LOG.info("create full backup image for all tables");
058
059    List<TableName> tables = Lists.newArrayList(table1, table2);
060    // Set custom Merge Job implementation
061
062
063    Connection conn = ConnectionFactory.createConnection(conf1);
064
065    Admin admin = conn.getAdmin();
066    BackupAdminImpl client = new BackupAdminImpl(conn);
067
068    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
069    String backupIdFull = client.backupTables(request);
070
071    assertTrue(checkSucceeded(backupIdFull));
072
073    // #2 - insert some data to table1
074    Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
075    LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
076
077    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS);
078    t1.close();
079    LOG.debug("written " + ADD_ROWS + " rows to " + table1);
080
081    Table t2 = insertIntoTable(conn, table2, famName, 1, ADD_ROWS);
082
083    Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + ADD_ROWS);
084    t2.close();
085    LOG.debug("written " + ADD_ROWS + " rows to " + table2);
086
087    // #3 - incremental backup for multiple tables
088    tables = Lists.newArrayList(table1, table2);
089    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
090    String backupIdIncMultiple = client.backupTables(request);
091
092    assertTrue(checkSucceeded(backupIdIncMultiple));
093
094    t1 = insertIntoTable(conn, table1, famName, 2, ADD_ROWS);
095    t1.close();
096
097    t2 = insertIntoTable(conn, table2, famName, 2, ADD_ROWS);
098    t2.close();
099
100    // #3 - incremental backup for multiple tables
101    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
102    String backupIdIncMultiple2 = client.backupTables(request);
103    assertTrue(checkSucceeded(backupIdIncMultiple2));
104
105    try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) {
106      String[] backups = new String[] { backupIdIncMultiple, backupIdIncMultiple2 };
107      bAdmin.mergeBackups(backups);
108    }
109
110    // #6 - restore incremental backup for multiple tables, with overwrite
111    TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
112    TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
113    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false,
114      tablesRestoreIncMultiple, tablesMapIncMultiple, true));
115
116    Table hTable = conn.getTable(table1_restore);
117    LOG.debug("After incremental restore: " + hTable.getDescriptor());
118    int countRows = TEST_UTIL.countRows(hTable, famName);
119    LOG.debug("f1 has " + countRows + " rows");
120    Assert.assertEquals(NB_ROWS_IN_BATCH + 2 * ADD_ROWS, countRows);
121
122    hTable.close();
123
124    hTable = conn.getTable(table2_restore);
125    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 2 * ADD_ROWS);
126    hTable.close();
127
128    admin.close();
129    conn.close();
130  }
131}