001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import static org.junit.jupiter.api.Assertions.assertEquals;
021import static org.junit.jupiter.api.Assertions.assertFalse;
022import static org.junit.jupiter.api.Assertions.assertTrue;
023
024import java.util.List;
025import org.apache.hadoop.hbase.TableName;
026import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
027import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
028import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
029import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
030import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage;
031import org.apache.hadoop.hbase.client.Admin;
032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
033import org.apache.hadoop.hbase.client.Connection;
034import org.apache.hadoop.hbase.client.ConnectionFactory;
035import org.apache.hadoop.hbase.client.Put;
036import org.apache.hadoop.hbase.client.Table;
037import org.apache.hadoop.hbase.client.TableDescriptor;
038import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
039import org.apache.hadoop.hbase.testclassification.LargeTests;
040import org.apache.hadoop.hbase.util.Bytes;
041import org.apache.hadoop.util.ToolRunner;
042import org.junit.jupiter.api.Tag;
043import org.junit.jupiter.api.Test;
044import org.slf4j.Logger;
045import org.slf4j.LoggerFactory;
046
047import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
048
049@Tag(LargeTests.TAG)
050public class TestIncrementalBackupWithFailures extends TestBackupBase {
051
052  private static final Logger LOG =
053    LoggerFactory.getLogger(TestIncrementalBackupWithFailures.class);
054
055  static {
056    provider = "multiwal";
057  }
058
059  // implement all test cases in 1 test since incremental backup/restore has dependencies
060  @Test
061  public void testIncBackupRestore() throws Exception {
062    int ADD_ROWS = 99;
063    // #1 - create full backup for all tables
064    LOG.info("create full backup image for all tables");
065
066    List<TableName> tables = Lists.newArrayList(table1, table2);
067    final byte[] fam3Name = Bytes.toBytes("f3");
068    TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc)
069      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)).build();
070    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
071
072    Connection conn = ConnectionFactory.createConnection(conf1);
073    int NB_ROWS_FAM3 = 6;
074    insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
075
076    Admin admin = conn.getAdmin();
077    BackupAdminImpl client = new BackupAdminImpl(conn);
078
079    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
080    String backupIdFull = client.backupTables(request);
081
082    assertTrue(checkSucceeded(backupIdFull));
083
084    // #2 - insert some data to table
085    Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
086    LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
087
088    assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
089    t1.close();
090    LOG.debug("written " + ADD_ROWS + " rows to " + table1);
091
092    Table t2 = conn.getTable(table2);
093    Put p2;
094    for (int i = 0; i < 5; i++) {
095      p2 = new Put(Bytes.toBytes("row-t2" + i));
096      p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
097      t2.put(p2);
098    }
099
100    assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + 5);
101    t2.close();
102    LOG.debug("written " + 5 + " rows to " + table2);
103
104    // #3 - incremental backup for multiple tables
105    incrementalBackupWithFailures();
106
107    admin.close();
108    conn.close();
109
110  }
111
112  private void incrementalBackupWithFailures() throws Exception {
113    conf1.set(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS,
114      IncrementalTableBackupClientForTest.class.getName());
115    int maxStage = Stage.values().length - 1;
116    // Fail stages between 0 and 4 inclusive
117    for (int stage = 0; stage <= maxStage; stage++) {
118      LOG.info("Running stage " + stage);
119      runBackupAndFailAtStage(stage);
120    }
121  }
122
123  private void runBackupAndFailAtStage(int stage) throws Exception {
124
125    conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage);
126    try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
127      int before = table.getBackupHistory().size();
128      String[] args = new String[] { "create", "incremental", BACKUP_ROOT_DIR, "-t",
129        table1.getNameAsString() + "," + table2.getNameAsString() };
130      // Run backup
131      int ret = ToolRunner.run(conf1, new BackupDriver(), args);
132      assertFalse(ret == 0);
133      List<BackupInfo> backups = table.getBackupHistory();
134      int after = table.getBackupHistory().size();
135
136      assertTrue(after == before + 1);
137      for (BackupInfo data : backups) {
138        if (data.getType() == BackupType.FULL) {
139          assertTrue(data.getState() == BackupState.COMPLETE);
140        } else {
141          assertTrue(data.getState() == BackupState.FAILED);
142        }
143      }
144    }
145  }
146
147}