001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import static org.junit.Assert.assertTrue;
021
022import java.util.List;
023import java.util.Map;
024import org.apache.hadoop.hbase.HBaseClassTestRule;
025import org.apache.hadoop.hbase.TableName;
026import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
027import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
028import org.apache.hadoop.hbase.backup.util.BackupUtils;
029import org.apache.hadoop.hbase.client.Admin;
030import org.apache.hadoop.hbase.client.Connection;
031import org.apache.hadoop.hbase.client.ConnectionFactory;
032import org.apache.hadoop.hbase.client.Put;
033import org.apache.hadoop.hbase.client.Table;
034import org.apache.hadoop.hbase.testclassification.LargeTests;
035import org.apache.hadoop.hbase.tool.TestBulkLoadHFiles;
036import org.apache.hadoop.hbase.util.Bytes;
037import org.apache.hadoop.hbase.util.Pair;
038import org.junit.Assert;
039import org.junit.ClassRule;
040import org.junit.Test;
041import org.junit.experimental.categories.Category;
042import org.slf4j.Logger;
043import org.slf4j.LoggerFactory;
044
045import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
046
047/**
048 * 1. Create table t1 2. Load data to t1 3 Full backup t1 4 Load data to t1 5 bulk load into t1 6
049 * Incremental backup t1
050 */
051@Category(LargeTests.class)
052public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
053
054  @ClassRule
055  public static final HBaseClassTestRule CLASS_RULE =
056    HBaseClassTestRule.forClass(TestIncrementalBackupWithBulkLoad.class);
057
058  private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackupDeleteTable.class);
059
060  // implement all test cases in 1 test since incremental backup/restore has dependencies
061  @Test
062  public void TestIncBackupDeleteTable() throws Exception {
063    String testName = "TestIncBackupDeleteTable";
064    // #1 - create full backup for all tables
065    LOG.info("create full backup image for all tables");
066
067    List<TableName> tables = Lists.newArrayList(table1);
068    Connection conn = ConnectionFactory.createConnection(conf1);
069    Admin admin = conn.getAdmin();
070    BackupAdminImpl client = new BackupAdminImpl(conn);
071
072    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
073    String backupIdFull = client.backupTables(request);
074
075    assertTrue(checkSucceeded(backupIdFull));
076
077    // #2 - insert some data to table table1
078    Table t1 = conn.getTable(table1);
079    Put p1;
080    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
081      p1 = new Put(Bytes.toBytes("row-t1" + i));
082      p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
083      t1.put(p1);
084    }
085
086    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
087    t1.close();
088
089    int NB_ROWS2 = 20;
090    LOG.debug("bulk loading into " + testName);
091    int actual =
092      TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, qualName, false, null,
093        new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
094          new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, },
095        true, false, true, NB_ROWS_IN_BATCH * 2, NB_ROWS2);
096
097    // #3 - incremental backup for table1
098    tables = Lists.newArrayList(table1);
099    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
100    String backupIdIncMultiple = client.backupTables(request);
101    assertTrue(checkSucceeded(backupIdIncMultiple));
102    // #4 bulk load again
103    LOG.debug("bulk loading into " + testName);
104    int actual1 =
105      TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, qualName, false, null,
106        new byte[][][] { new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("qqq") },
107          new byte[][] { Bytes.toBytes("rrr"), Bytes.toBytes("sss") }, },
108        true, false, true, NB_ROWS_IN_BATCH * 2 + actual, NB_ROWS2);
109
110    // #5 - incremental backup for table1
111    tables = Lists.newArrayList(table1);
112    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
113    String backupIdIncMultiple1 = client.backupTables(request);
114    assertTrue(checkSucceeded(backupIdIncMultiple1));
115    // Delete all data in table1
116    TEST_UTIL.deleteTableData(table1);
117
118    // #6 - restore incremental backup for table1
119    TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
120    // TableName[] tablesMapIncMultiple = new TableName[] { table1_restore };
121    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple1, false,
122      tablesRestoreIncMultiple, tablesRestoreIncMultiple, true));
123
124    Table hTable = conn.getTable(table1);
125    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2 + actual + actual1);
126    request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
127
128    backupIdFull = client.backupTables(request);
129    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
130      Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>,
131        List<byte[]>> pair = table.readBulkloadRows(tables);
132      assertTrue("map still has " + pair.getSecond().size() + " entries",
133        pair.getSecond().isEmpty());
134    }
135    assertTrue(checkSucceeded(backupIdFull));
136
137    hTable.close();
138    admin.close();
139    conn.close();
140  }
141}