001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import static org.junit.Assert.assertTrue;
021
022import java.util.ArrayList;
023import java.util.Collection;
024import java.util.List;
025import org.apache.hadoop.hbase.HBaseClassTestRule;
026import org.apache.hadoop.hbase.HBaseTestingUtil;
027import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
028import org.apache.hadoop.hbase.TableName;
029import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
030import org.apache.hadoop.hbase.backup.util.BackupUtils;
031import org.apache.hadoop.hbase.client.Admin;
032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
033import org.apache.hadoop.hbase.client.Connection;
034import org.apache.hadoop.hbase.client.ConnectionFactory;
035import org.apache.hadoop.hbase.client.Put;
036import org.apache.hadoop.hbase.client.Table;
037import org.apache.hadoop.hbase.client.TableDescriptor;
038import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
039import org.apache.hadoop.hbase.regionserver.HRegion;
040import org.apache.hadoop.hbase.testclassification.LargeTests;
041import org.apache.hadoop.hbase.util.Bytes;
042import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
043import org.junit.Assert;
044import org.junit.ClassRule;
045import org.junit.Test;
046import org.junit.experimental.categories.Category;
047import org.junit.runner.RunWith;
048import org.junit.runners.Parameterized;
049import org.slf4j.Logger;
050import org.slf4j.LoggerFactory;
051
052import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
053
054@Category(LargeTests.class)
055@RunWith(Parameterized.class)
056public class TestIncrementalBackup extends TestBackupBase {
057
058  @ClassRule
059  public static final HBaseClassTestRule CLASS_RULE =
060    HBaseClassTestRule.forClass(TestIncrementalBackup.class);
061
062  private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackup.class);
063
064  @Parameterized.Parameters
065  public static Collection<Object[]> data() {
066    provider = "multiwal";
067    List<Object[]> params = new ArrayList<>();
068    params.add(new Object[] { Boolean.TRUE });
069    return params;
070  }
071
072  public TestIncrementalBackup(Boolean b) {
073  }
074
075  // implement all test cases in 1 test since incremental
076  // backup/restore has dependencies
077  @Test
078  public void TestIncBackupRestore() throws Exception {
079    int ADD_ROWS = 99;
080
081    // #1 - create full backup for all tables
082    LOG.info("create full backup image for all tables");
083    List<TableName> tables = Lists.newArrayList(table1, table2);
084    final byte[] fam3Name = Bytes.toBytes("f3");
085    final byte[] mobName = Bytes.toBytes("mob");
086
087    TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc)
088      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name))
089      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true)
090        .setMobThreshold(5L).build())
091      .build();
092    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
093
094    try (Connection conn = ConnectionFactory.createConnection(conf1)) {
095      int NB_ROWS_FAM3 = 6;
096      insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
097      insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close();
098      Admin admin = conn.getAdmin();
099      BackupAdminImpl client = new BackupAdminImpl(conn);
100      BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
101      String backupIdFull = client.backupTables(request);
102      assertTrue(checkSucceeded(backupIdFull));
103
104      // #2 - insert some data to table
105      Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
106      LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
107      Assert.assertEquals(HBaseTestingUtil.countRows(t1),
108        NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
109      LOG.debug("written " + ADD_ROWS + " rows to " + table1);
110      // additionally, insert rows to MOB cf
111      int NB_ROWS_MOB = 111;
112      insertIntoTable(conn, table1, mobName, 3, NB_ROWS_MOB);
113      LOG.debug("written " + NB_ROWS_MOB + " rows to " + table1 + " to Mob enabled CF");
114      t1.close();
115      Assert.assertEquals(HBaseTestingUtil.countRows(t1),
116        NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB);
117      Table t2 = conn.getTable(table2);
118      Put p2;
119      for (int i = 0; i < 5; i++) {
120        p2 = new Put(Bytes.toBytes("row-t2" + i));
121        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
122        t2.put(p2);
123      }
124      Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(t2));
125      t2.close();
126      LOG.debug("written " + 5 + " rows to " + table2);
127      // split table1
128      SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
129      List<HRegion> regions = cluster.getRegions(table1);
130      byte[] name = regions.get(0).getRegionInfo().getRegionName();
131      long startSplitTime = EnvironmentEdgeManager.currentTime();
132      try {
133        admin.splitRegionAsync(name).get();
134      } catch (Exception e) {
135        // although split fail, this may not affect following check in current API,
136        // exception will be thrown.
137        LOG.debug("region is not splittable, because " + e);
138      }
139      while (!admin.isTableAvailable(table1)) {
140        Thread.sleep(100);
141      }
142      long endSplitTime = EnvironmentEdgeManager.currentTime();
143      // split finished
144      LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
145
146      // #3 - incremental backup for multiple tables
147      tables = Lists.newArrayList(table1, table2);
148      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
149      String backupIdIncMultiple = client.backupTables(request);
150      assertTrue(checkSucceeded(backupIdIncMultiple));
151
152      // add column family f2 to table1
153      // drop column family f3
154      final byte[] fam2Name = Bytes.toBytes("f2");
155      newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc)
156        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name)
157        .build();
158      TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
159
160      int NB_ROWS_FAM2 = 7;
161      Table t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
162      t3.close();
163
164      // Wait for 5 sec to make sure that old WALs were deleted
165      Thread.sleep(5000);
166
167      // #4 - additional incremental backup for multiple tables
168      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
169      String backupIdIncMultiple2 = client.backupTables(request);
170      assertTrue(checkSucceeded(backupIdIncMultiple2));
171
172      // #5 - restore full backup for all tables
173      TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
174      TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
175
176      LOG.debug("Restoring full " + backupIdFull);
177      client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false,
178        tablesRestoreFull, tablesMapFull, true));
179
180      // #6.1 - check tables for full restore
181      Admin hAdmin = TEST_UTIL.getAdmin();
182      assertTrue(hAdmin.tableExists(table1_restore));
183      assertTrue(hAdmin.tableExists(table2_restore));
184      hAdmin.close();
185
186      // #6.2 - checking row count of tables for full restore
187      Table hTable = conn.getTable(table1_restore);
188      Assert.assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
189      hTable.close();
190
191      hTable = conn.getTable(table2_restore);
192      Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtil.countRows(hTable));
193      hTable.close();
194
195      // #7 - restore incremental backup for multiple tables, with overwrite
196      TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
197      TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
198      client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false,
199        tablesRestoreIncMultiple, tablesMapIncMultiple, true));
200      hTable = conn.getTable(table1_restore);
201
202      LOG.debug("After incremental restore: " + hTable.getDescriptor());
203      int countFamName = TEST_UTIL.countRows(hTable, famName);
204      LOG.debug("f1 has " + countFamName + " rows");
205      Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS);
206
207      int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name);
208      LOG.debug("f2 has " + countFam2Name + " rows");
209      Assert.assertEquals(countFam2Name, NB_ROWS_FAM2);
210
211      int countMobName = TEST_UTIL.countRows(hTable, mobName);
212      LOG.debug("mob has " + countMobName + " rows");
213      Assert.assertEquals(countMobName, NB_ROWS_MOB);
214      hTable.close();
215
216      hTable = conn.getTable(table2_restore);
217      Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(hTable));
218      hTable.close();
219      admin.close();
220    }
221  }
222}