001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import static org.junit.jupiter.api.Assertions.assertEquals;
021import static org.junit.jupiter.api.Assertions.assertThrows;
022import static org.junit.jupiter.api.Assertions.assertTrue;
023
024import java.io.IOException;
025import java.util.HashSet;
026import java.util.List;
027import org.apache.hadoop.fs.Path;
028import org.apache.hadoop.hbase.HBaseTestingUtil;
029import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
030import org.apache.hadoop.hbase.TableName;
031import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
032import org.apache.hadoop.hbase.backup.impl.BackupManifest;
033import org.apache.hadoop.hbase.backup.util.BackupUtils;
034import org.apache.hadoop.hbase.client.Admin;
035import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
036import org.apache.hadoop.hbase.client.Connection;
037import org.apache.hadoop.hbase.client.ConnectionFactory;
038import org.apache.hadoop.hbase.client.Put;
039import org.apache.hadoop.hbase.client.Table;
040import org.apache.hadoop.hbase.client.TableDescriptor;
041import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
042import org.apache.hadoop.hbase.regionserver.HRegion;
043import org.apache.hadoop.hbase.testclassification.LargeTests;
044import org.apache.hadoop.hbase.util.Bytes;
045import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
046import org.junit.jupiter.api.Tag;
047import org.junit.jupiter.api.Test;
048import org.slf4j.Logger;
049import org.slf4j.LoggerFactory;
050
051import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
052import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
053
054@Tag(LargeTests.TAG)
055public class TestIncrementalBackupRestore extends IncrementalBackupRestoreTestBase {
056
057  private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackupRestore.class);
058
059  // implement all test cases in 1 test since incremental
060  // backup/restore has dependencies
061  @Test
062  public void testIncBackupRestore() throws Exception {
063    int ADD_ROWS = 99;
064
065    // #1 - create full backup for all tables
066    LOG.info("create full backup image for all tables");
067    List<TableName> tables = Lists.newArrayList(table1, table2);
068    final byte[] fam3Name = Bytes.toBytes("f3");
069    final byte[] mobName = Bytes.toBytes("mob");
070
071    TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc)
072      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name))
073      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true)
074        .setMobThreshold(5L).build())
075      .build();
076    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
077
078    try (Connection conn = ConnectionFactory.createConnection(conf1);
079      Admin admin = conn.getAdmin()) {
080      int NB_ROWS_FAM3 = 6;
081      insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
082      insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close();
083
084      BackupAdminImpl client = new BackupAdminImpl(conn);
085      BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
086      String backupIdFull = takeFullBackup(tables, client);
087      validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdFull);
088      assertTrue(checkSucceeded(backupIdFull));
089
090      // #2 - insert some data to table
091      Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
092      LOG.debug("writing {} rows to {}", ADD_ROWS, table1);
093      assertEquals(HBaseTestingUtil.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
094      LOG.debug("written {} rows to {}", ADD_ROWS, table1);
095      // additionally, insert rows to MOB cf
096      int NB_ROWS_MOB = 111;
097      insertIntoTable(conn, table1, mobName, 3, NB_ROWS_MOB);
098      LOG.debug("written {} rows to {} to Mob enabled CF", NB_ROWS_MOB, table1);
099      t1.close();
100      assertEquals(HBaseTestingUtil.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB);
101      Table t2 = conn.getTable(table2);
102      Put p2;
103      for (int i = 0; i < 5; i++) {
104        p2 = new Put(Bytes.toBytes("row-t2" + i));
105        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
106        t2.put(p2);
107      }
108      assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(t2));
109      t2.close();
110      LOG.debug("written 5 rows to {}", table2);
111      // split table1
112      SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
113      List<HRegion> regions = cluster.getRegions(table1);
114      byte[] name = regions.get(0).getRegionInfo().getRegionName();
115      long startSplitTime = EnvironmentEdgeManager.currentTime();
116      try {
117        admin.splitRegionAsync(name).get();
118      } catch (Exception e) {
119        // although split fail, this may not affect following check in current API,
120        // exception will be thrown.
121        LOG.debug("region is not splittable, because " + e);
122      }
123      TEST_UTIL.waitTableAvailable(table1);
124      long endSplitTime = EnvironmentEdgeManager.currentTime();
125      // split finished
126      LOG.debug("split finished in = {}", endSplitTime - startSplitTime);
127
128      // #3 - incremental backup for multiple tables
129      tables = Lists.newArrayList(table1, table2);
130      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
131      String backupIdIncMultiple = client.backupTables(request);
132      assertTrue(checkSucceeded(backupIdIncMultiple));
133      BackupManifest manifest =
134        HBackupFileSystem.getManifest(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncMultiple);
135      assertEquals(Sets.newHashSet(table1, table2), new HashSet<>(manifest.getTableList()));
136      validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdIncMultiple);
137
138      // add column family f2 to table1
139      // drop column family f3
140      final byte[] fam2Name = Bytes.toBytes("f2");
141      newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc)
142        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name)
143        .build();
144      TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
145
146      // check that an incremental backup fails because the CFs don't match
147      final List<TableName> tablesCopy = tables;
148      IOException ex = assertThrows(IOException.class, () -> client
149        .backupTables(createBackupRequest(BackupType.INCREMENTAL, tablesCopy, BACKUP_ROOT_DIR)));
150      checkThrowsCFMismatch(ex, List.of(table1));
151      takeFullBackup(tables, client);
152
153      int NB_ROWS_FAM2 = 7;
154      Table t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
155      t3.close();
156
157      // Wait for 5 sec to make sure that old WALs were deleted
158      Thread.sleep(5000);
159
160      // #4 - additional incremental backup for multiple tables
161      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
162      String backupIdIncMultiple2 = client.backupTables(request);
163      assertTrue(checkSucceeded(backupIdIncMultiple2));
164      validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdIncMultiple2);
165
166      // #5 - restore full backup for all tables
167      TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
168      TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
169
170      LOG.debug("Restoring full {}", backupIdFull);
171      client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false,
172        tablesRestoreFull, tablesMapFull, true));
173
174      // #6.1 - check tables for full restore
175      Admin hAdmin = TEST_UTIL.getAdmin();
176      assertTrue(hAdmin.tableExists(table1_restore));
177      assertTrue(hAdmin.tableExists(table2_restore));
178
179      // #6.2 - checking row count of tables for full restore
180      try (Table hTable = conn.getTable(table1_restore)) {
181        assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
182      }
183
184      try (Table hTable = conn.getTable(table2_restore)) {
185        assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtil.countRows(hTable));
186      }
187
188      // #7 - restore incremental backup for multiple tables, with overwrite
189      TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
190      TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
191      client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false,
192        tablesRestoreIncMultiple, tablesMapIncMultiple, true));
193      try (Table hTable = conn.getTable(table1_restore)) {
194        LOG.debug("After incremental restore: {}", hTable.getDescriptor());
195        int countFamName = HBaseTestingUtil.countRows(hTable, famName);
196        LOG.debug("f1 has " + countFamName + " rows");
197        assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS);
198
199        int countFam2Name = HBaseTestingUtil.countRows(hTable, fam2Name);
200        LOG.debug("f2 has {} rows", countFam2Name);
201        assertEquals(countFam2Name, NB_ROWS_FAM2);
202
203        int countMobName = HBaseTestingUtil.countRows(hTable, mobName);
204        LOG.debug("mob has {} rows", countMobName);
205        assertEquals(countMobName, NB_ROWS_MOB);
206      }
207
208      try (Table hTable = conn.getTable(table2_restore)) {
209        assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(hTable));
210      }
211    }
212  }
213}