001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertFalse;
022import static org.junit.Assert.assertNotEquals;
023import static org.junit.Assert.assertThrows;
024import static org.junit.Assert.assertTrue;
025
026import java.io.File;
027import java.io.IOException;
028import java.nio.ByteBuffer;
029import java.util.ArrayList;
030import java.util.Collection;
031import java.util.HashSet;
032import java.util.List;
033import java.util.Map;
034import java.util.UUID;
035import org.apache.hadoop.fs.FileSystem;
036import org.apache.hadoop.fs.LocatedFileStatus;
037import org.apache.hadoop.fs.Path;
038import org.apache.hadoop.fs.RemoteIterator;
039import org.apache.hadoop.hbase.HBaseClassTestRule;
040import org.apache.hadoop.hbase.HBaseTestingUtil;
041import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
042import org.apache.hadoop.hbase.TableName;
043import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
044import org.apache.hadoop.hbase.backup.impl.BackupManifest;
045import org.apache.hadoop.hbase.backup.impl.ColumnFamilyMismatchException;
046import org.apache.hadoop.hbase.backup.util.BackupUtils;
047import org.apache.hadoop.hbase.client.Admin;
048import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
049import org.apache.hadoop.hbase.client.Connection;
050import org.apache.hadoop.hbase.client.ConnectionFactory;
051import org.apache.hadoop.hbase.client.Put;
052import org.apache.hadoop.hbase.client.Table;
053import org.apache.hadoop.hbase.client.TableDescriptor;
054import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
055import org.apache.hadoop.hbase.regionserver.HRegion;
056import org.apache.hadoop.hbase.regionserver.LogRoller;
057import org.apache.hadoop.hbase.testclassification.LargeTests;
058import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
059import org.apache.hadoop.hbase.util.Bytes;
060import org.apache.hadoop.hbase.util.CommonFSUtils;
061import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
062import org.apache.hadoop.hbase.util.HFileArchiveUtil;
063import org.apache.hadoop.hbase.util.HFileTestUtil;
064import org.junit.After;
065import org.junit.Assert;
066import org.junit.ClassRule;
067import org.junit.Test;
068import org.junit.experimental.categories.Category;
069import org.junit.runner.RunWith;
070import org.junit.runners.Parameterized;
071import org.slf4j.Logger;
072import org.slf4j.LoggerFactory;
073
074import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
075import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
076import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
077
078@Category(LargeTests.class)
079@RunWith(Parameterized.class)
080public class TestIncrementalBackup extends TestBackupBase {
081
082  @ClassRule
083  public static final HBaseClassTestRule CLASS_RULE =
084    HBaseClassTestRule.forClass(TestIncrementalBackup.class);
085
086  private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackup.class);
087  private static final byte[] BULKLOAD_START_KEY = new byte[] { 0x00 };
088  private static final byte[] BULKLOAD_END_KEY = new byte[] { Byte.MAX_VALUE };
089
090  @Parameterized.Parameters
091  public static Collection<Object[]> data() {
092    provider = "multiwal";
093    List<Object[]> params = new ArrayList<>();
094    params.add(new Object[] { Boolean.TRUE });
095    return params;
096  }
097
098  public TestIncrementalBackup(Boolean b) {
099  }
100
101  @After
102  public void ensurePreviousBackupTestsAreCleanedUp() throws Exception {
103    TEST_UTIL.flush(table1);
104    TEST_UTIL.flush(table2);
105
106    TEST_UTIL.truncateTable(table1).close();
107    TEST_UTIL.truncateTable(table2).close();
108
109    if (TEST_UTIL.getAdmin().tableExists(table1_restore)) {
110      TEST_UTIL.flush(table1_restore);
111      TEST_UTIL.truncateTable(table1_restore).close();
112    }
113
114    TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().forEach(rst -> {
115      try {
116        LogRoller walRoller = rst.getRegionServer().getWalRoller();
117        walRoller.requestRollAll();
118        walRoller.waitUntilWalRollFinished();
119      } catch (Exception ignored) {
120      }
121    });
122
123    try (Table table = TEST_UTIL.getConnection().getTable(table1)) {
124      loadTable(table);
125    }
126
127    try (Table table = TEST_UTIL.getConnection().getTable(table2)) {
128      loadTable(table);
129    }
130  }
131
132  // implement all test cases in 1 test since incremental
133  // backup/restore has dependencies
134  @Test
135  public void TestIncBackupRestore() throws Exception {
136    int ADD_ROWS = 99;
137
138    // #1 - create full backup for all tables
139    LOG.info("create full backup image for all tables");
140    List<TableName> tables = Lists.newArrayList(table1, table2);
141    final byte[] fam3Name = Bytes.toBytes("f3");
142    final byte[] mobName = Bytes.toBytes("mob");
143
144    TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc)
145      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name))
146      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true)
147        .setMobThreshold(5L).build())
148      .build();
149    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
150
151    try (Connection conn = ConnectionFactory.createConnection(conf1)) {
152      int NB_ROWS_FAM3 = 6;
153      insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
154      insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close();
155      Admin admin = conn.getAdmin();
156      BackupAdminImpl client = new BackupAdminImpl(conn);
157      BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
158      String backupIdFull = takeFullBackup(tables, client);
159      validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdFull);
160      assertTrue(checkSucceeded(backupIdFull));
161
162      // #2 - insert some data to table
163      Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
164      LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
165      Assert.assertEquals(HBaseTestingUtil.countRows(t1),
166        NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
167      LOG.debug("written " + ADD_ROWS + " rows to " + table1);
168      // additionally, insert rows to MOB cf
169      int NB_ROWS_MOB = 111;
170      insertIntoTable(conn, table1, mobName, 3, NB_ROWS_MOB);
171      LOG.debug("written " + NB_ROWS_MOB + " rows to " + table1 + " to Mob enabled CF");
172      t1.close();
173      Assert.assertEquals(HBaseTestingUtil.countRows(t1),
174        NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB);
175      Table t2 = conn.getTable(table2);
176      Put p2;
177      for (int i = 0; i < 5; i++) {
178        p2 = new Put(Bytes.toBytes("row-t2" + i));
179        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
180        t2.put(p2);
181      }
182      Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(t2));
183      t2.close();
184      LOG.debug("written " + 5 + " rows to " + table2);
185      // split table1
186      SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
187      List<HRegion> regions = cluster.getRegions(table1);
188      byte[] name = regions.get(0).getRegionInfo().getRegionName();
189      long startSplitTime = EnvironmentEdgeManager.currentTime();
190      try {
191        admin.splitRegionAsync(name).get();
192      } catch (Exception e) {
193        // although split fail, this may not affect following check in current API,
194        // exception will be thrown.
195        LOG.debug("region is not splittable, because " + e);
196      }
197      TEST_UTIL.waitTableAvailable(table1);
198      long endSplitTime = EnvironmentEdgeManager.currentTime();
199      // split finished
200      LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
201
202      // #3 - incremental backup for multiple tables
203      tables = Lists.newArrayList(table1, table2);
204      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
205      String backupIdIncMultiple = client.backupTables(request);
206      assertTrue(checkSucceeded(backupIdIncMultiple));
207      BackupManifest manifest =
208        HBackupFileSystem.getManifest(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncMultiple);
209      assertEquals(Sets.newHashSet(table1, table2), new HashSet<>(manifest.getTableList()));
210      validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdIncMultiple);
211
212      // add column family f2 to table1
213      // drop column family f3
214      final byte[] fam2Name = Bytes.toBytes("f2");
215      newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc)
216        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name)
217        .build();
218      TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
219
220      // check that an incremental backup fails because the CFs don't match
221      final List<TableName> tablesCopy = tables;
222      IOException ex = assertThrows(IOException.class, () -> client
223        .backupTables(createBackupRequest(BackupType.INCREMENTAL, tablesCopy, BACKUP_ROOT_DIR)));
224      checkThrowsCFMismatch(ex, List.of(table1));
225      takeFullBackup(tables, client);
226
227      int NB_ROWS_FAM2 = 7;
228      Table t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
229      t3.close();
230
231      // Wait for 5 sec to make sure that old WALs were deleted
232      Thread.sleep(5000);
233
234      // #4 - additional incremental backup for multiple tables
235      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
236      String backupIdIncMultiple2 = client.backupTables(request);
237      assertTrue(checkSucceeded(backupIdIncMultiple2));
238      validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdIncMultiple2);
239
240      // #5 - restore full backup for all tables
241      TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
242      TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
243
244      LOG.debug("Restoring full " + backupIdFull);
245      client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false,
246        tablesRestoreFull, tablesMapFull, true));
247
248      // #6.1 - check tables for full restore
249      Admin hAdmin = TEST_UTIL.getAdmin();
250      assertTrue(hAdmin.tableExists(table1_restore));
251      assertTrue(hAdmin.tableExists(table2_restore));
252      hAdmin.close();
253
254      // #6.2 - checking row count of tables for full restore
255      Table hTable = conn.getTable(table1_restore);
256      Assert.assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
257      hTable.close();
258
259      hTable = conn.getTable(table2_restore);
260      Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtil.countRows(hTable));
261      hTable.close();
262
263      // #7 - restore incremental backup for multiple tables, with overwrite
264      TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
265      TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
266      client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false,
267        tablesRestoreIncMultiple, tablesMapIncMultiple, true));
268      hTable = conn.getTable(table1_restore);
269
270      LOG.debug("After incremental restore: " + hTable.getDescriptor());
271      int countFamName = TEST_UTIL.countRows(hTable, famName);
272      LOG.debug("f1 has " + countFamName + " rows");
273      Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS);
274
275      int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name);
276      LOG.debug("f2 has " + countFam2Name + " rows");
277      Assert.assertEquals(countFam2Name, NB_ROWS_FAM2);
278
279      int countMobName = TEST_UTIL.countRows(hTable, mobName);
280      LOG.debug("mob has " + countMobName + " rows");
281      Assert.assertEquals(countMobName, NB_ROWS_MOB);
282      hTable.close();
283
284      hTable = conn.getTable(table2_restore);
285      Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(hTable));
286      hTable.close();
287      admin.close();
288    }
289  }
290
291  @Test
292  public void TestIncBackupRestoreWithOriginalSplits() throws Exception {
293    byte[] mobFam = Bytes.toBytes("mob");
294
295    List<TableName> tables = Lists.newArrayList(table1);
296    TableDescriptor newTable1Desc =
297      TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder
298        .newBuilder(mobFam).setMobEnabled(true).setMobThreshold(5L).build()).build();
299    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
300
301    Connection conn = TEST_UTIL.getConnection();
302    BackupAdminImpl backupAdmin = new BackupAdminImpl(conn);
303    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
304    String fullBackupId = backupAdmin.backupTables(request);
305    assertTrue(checkSucceeded(fullBackupId));
306
307    TableName[] fromTables = new TableName[] { table1 };
308    TableName[] toTables = new TableName[] { table1_restore };
309
310    List<LocatedFileStatus> preRestoreBackupFiles = getBackupFiles();
311    backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, fullBackupId, false,
312      fromTables, toTables, true, true));
313    List<LocatedFileStatus> postRestoreBackupFiles = getBackupFiles();
314
315    // Check that the backup files are the same before and after the restore process
316    Assert.assertEquals(postRestoreBackupFiles, preRestoreBackupFiles);
317    Assert.assertEquals(TEST_UTIL.countRows(table1_restore), NB_ROWS_IN_BATCH);
318
319    int ROWS_TO_ADD = 1_000;
320    // different IDs so that rows don't overlap
321    insertIntoTable(conn, table1, famName, 3, ROWS_TO_ADD);
322    insertIntoTable(conn, table1, mobFam, 4, ROWS_TO_ADD);
323
324    try (Admin admin = conn.getAdmin()) {
325      List<HRegion> currentRegions = TEST_UTIL.getHBaseCluster().getRegions(table1);
326      for (HRegion region : currentRegions) {
327        byte[] name = region.getRegionInfo().getEncodedNameAsBytes();
328        admin.splitRegionAsync(name).get();
329      }
330
331      TEST_UTIL.waitTableAvailable(table1);
332
333      // Make sure we've split regions
334      assertNotEquals(currentRegions, TEST_UTIL.getHBaseCluster().getRegions(table1));
335
336      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
337      String incrementalBackupId = backupAdmin.backupTables(request);
338      assertTrue(checkSucceeded(incrementalBackupId));
339      preRestoreBackupFiles = getBackupFiles();
340      backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, incrementalBackupId,
341        false, fromTables, toTables, true, true));
342      postRestoreBackupFiles = getBackupFiles();
343      Assert.assertEquals(postRestoreBackupFiles, preRestoreBackupFiles);
344      Assert.assertEquals(NB_ROWS_IN_BATCH + ROWS_TO_ADD + ROWS_TO_ADD,
345        TEST_UTIL.countRows(table1_restore));
346
347      // test bulkloads
348      HRegion regionToBulkload = TEST_UTIL.getHBaseCluster().getRegions(table1).get(0);
349      String regionName = regionToBulkload.getRegionInfo().getEncodedName();
350
351      insertIntoTable(conn, table1, famName, 5, ROWS_TO_ADD);
352      insertIntoTable(conn, table1, mobFam, 6, ROWS_TO_ADD);
353
354      doBulkload(table1, regionName, famName, mobFam);
355
356      // we need to major compact the regions to make sure there are no references
357      // and the regions are once again splittable
358      TEST_UTIL.compact(true);
359      TEST_UTIL.flush();
360      TEST_UTIL.waitTableAvailable(table1);
361
362      for (HRegion region : TEST_UTIL.getHBaseCluster().getRegions(table1)) {
363        if (region.isSplittable()) {
364          admin.splitRegionAsync(region.getRegionInfo().getEncodedNameAsBytes()).get();
365        }
366      }
367
368      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
369      incrementalBackupId = backupAdmin.backupTables(request);
370      assertTrue(checkSucceeded(incrementalBackupId));
371
372      preRestoreBackupFiles = getBackupFiles();
373      backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, incrementalBackupId,
374        false, fromTables, toTables, true, true));
375      postRestoreBackupFiles = getBackupFiles();
376
377      Assert.assertEquals(postRestoreBackupFiles, preRestoreBackupFiles);
378
379      int rowsExpected = TEST_UTIL.countRows(table1);
380      int rowsActual = TEST_UTIL.countRows(table1_restore);
381
382      Assert.assertEquals(rowsExpected, rowsActual);
383    }
384  }
385
386  @Test
387  public void TestIncBackupRestoreWithOriginalSplitsSeperateFs() throws Exception {
388    String originalBackupRoot = BACKUP_ROOT_DIR;
389    // prepare BACKUP_ROOT_DIR on a different filesystem from HBase.
390    try (Connection conn = ConnectionFactory.createConnection(conf1);
391      BackupAdminImpl admin = new BackupAdminImpl(conn)) {
392      String backupTargetDir = TEST_UTIL.getDataTestDir("backupTarget").toString();
393      BACKUP_ROOT_DIR = new File(backupTargetDir).toURI().toString();
394
395      List<TableName> tables = Lists.newArrayList(table1);
396
397      insertIntoTable(conn, table1, famName, 3, 100);
398      String fullBackupId = takeFullBackup(tables, admin, true);
399      assertTrue(checkSucceeded(fullBackupId));
400
401      insertIntoTable(conn, table1, famName, 4, 100);
402
403      HRegion regionToBulkload = TEST_UTIL.getHBaseCluster().getRegions(table1).get(0);
404      String regionName = regionToBulkload.getRegionInfo().getEncodedName();
405      doBulkload(table1, regionName, famName);
406
407      BackupRequest request =
408        createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR, true);
409      String incrementalBackupId = admin.backupTables(request);
410      assertTrue(checkSucceeded(incrementalBackupId));
411
412      TableName[] fromTable = new TableName[] { table1 };
413      TableName[] toTable = new TableName[] { table1_restore };
414
415      // Using original splits
416      admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, incrementalBackupId, false,
417        fromTable, toTable, true, true));
418
419      int actualRowCount = TEST_UTIL.countRows(table1_restore);
420      int expectedRowCount = TEST_UTIL.countRows(table1);
421      assertEquals(expectedRowCount, actualRowCount);
422
423      // Using new splits
424      admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, incrementalBackupId, false,
425        fromTable, toTable, true, false));
426
427      expectedRowCount = TEST_UTIL.countRows(table1);
428      assertEquals(expectedRowCount, actualRowCount);
429
430    } finally {
431      BACKUP_ROOT_DIR = originalBackupRoot;
432    }
433
434  }
435
436  @Test
437  public void TestIncBackupRestoreHandlesArchivedFiles() throws Exception {
438    byte[] fam2 = Bytes.toBytes("f2");
439    TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc)
440      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam2).build()).build();
441    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
442    try (Connection conn = ConnectionFactory.createConnection(conf1);
443      BackupAdminImpl admin = new BackupAdminImpl(conn)) {
444      String backupTargetDir = TEST_UTIL.getDataTestDir("backupTarget").toString();
445      BACKUP_ROOT_DIR = new File(backupTargetDir).toURI().toString();
446
447      List<TableName> tables = Lists.newArrayList(table1);
448
449      insertIntoTable(conn, table1, famName, 3, 100);
450      String fullBackupId = takeFullBackup(tables, admin, true);
451      assertTrue(checkSucceeded(fullBackupId));
452
453      insertIntoTable(conn, table1, famName, 4, 100);
454
455      HRegion regionToBulkload = TEST_UTIL.getHBaseCluster().getRegions(table1).get(0);
456      String regionName = regionToBulkload.getRegionInfo().getEncodedName();
457      // Requires a mult-fam bulkload to ensure we're appropriately handling
458      // multi-file bulkloads
459      Path regionDir = doBulkload(table1, regionName, famName, fam2);
460
461      // archive the files in the region directory
462      Path archiveDir =
463        HFileArchiveUtil.getStoreArchivePath(conf1, table1, regionName, Bytes.toString(famName));
464      TEST_UTIL.getTestFileSystem().mkdirs(archiveDir);
465      RemoteIterator<LocatedFileStatus> iter =
466        TEST_UTIL.getTestFileSystem().listFiles(regionDir, true);
467      List<Path> paths = new ArrayList<>();
468      while (iter.hasNext()) {
469        Path path = iter.next().getPath();
470        if (path.toString().contains("_SeqId_")) {
471          paths.add(path);
472        }
473      }
474      assertTrue(paths.size() > 1);
475      Path path = paths.get(0);
476      String name = path.toString();
477      int startIdx = name.lastIndexOf(Path.SEPARATOR);
478      String filename = name.substring(startIdx + 1);
479      Path archiveFile = new Path(archiveDir, filename);
480      // archive 1 of the files
481      boolean success = TEST_UTIL.getTestFileSystem().rename(path, archiveFile);
482      assertTrue(success);
483      assertTrue(TEST_UTIL.getTestFileSystem().exists(archiveFile));
484      assertFalse(TEST_UTIL.getTestFileSystem().exists(path));
485
486      BackupRequest request =
487        createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR, true);
488      String incrementalBackupId = admin.backupTables(request);
489      assertTrue(checkSucceeded(incrementalBackupId));
490
491      TableName[] fromTable = new TableName[] { table1 };
492      TableName[] toTable = new TableName[] { table1_restore };
493
494      admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, incrementalBackupId, false,
495        fromTable, toTable, true));
496
497      int actualRowCount = TEST_UTIL.countRows(table1_restore);
498      int expectedRowCount = TEST_UTIL.countRows(table1);
499      assertEquals(expectedRowCount, actualRowCount);
500    }
501  }
502
503  private void checkThrowsCFMismatch(IOException ex, List<TableName> tables) {
504    Throwable cause = Throwables.getRootCause(ex);
505    assertEquals(cause.getClass(), ColumnFamilyMismatchException.class);
506    ColumnFamilyMismatchException e = (ColumnFamilyMismatchException) cause;
507    assertEquals(tables, e.getMismatchedTables());
508  }
509
510  private String takeFullBackup(List<TableName> tables, BackupAdminImpl backupAdmin)
511    throws IOException {
512    return takeFullBackup(tables, backupAdmin, false);
513  }
514
515  private String takeFullBackup(List<TableName> tables, BackupAdminImpl backupAdmin,
516    boolean noChecksumVerify) throws IOException {
517    BackupRequest req =
518      createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR, noChecksumVerify);
519    String backupId = backupAdmin.backupTables(req);
520    checkSucceeded(backupId);
521    return backupId;
522  }
523
524  private static Path doBulkload(TableName tn, String regionName, byte[]... fams)
525    throws IOException {
526    Path regionDir = createHFiles(tn, regionName, fams);
527    Map<BulkLoadHFiles.LoadQueueItem, ByteBuffer> results =
528      BulkLoadHFiles.create(conf1).bulkLoad(tn, regionDir);
529    assertFalse(results.isEmpty());
530    return regionDir;
531  }
532
533  private static Path createHFiles(TableName tn, String regionName, byte[]... fams)
534    throws IOException {
535    Path rootdir = CommonFSUtils.getRootDir(conf1);
536    Path regionDir = CommonFSUtils.getRegionDir(rootdir, tn, regionName);
537
538    FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
539    fs.mkdirs(rootdir);
540
541    for (byte[] fam : fams) {
542      Path famDir = new Path(regionDir, Bytes.toString(fam));
543      Path hFileDir = new Path(famDir, UUID.randomUUID().toString());
544      HFileTestUtil.createHFile(conf1, fs, hFileDir, fam, qualName, BULKLOAD_START_KEY,
545        BULKLOAD_END_KEY, 1000);
546    }
547
548    return regionDir;
549  }
550
551  /**
552   * Check that backup manifest can be produced for a different root. Users may want to move
553   * existing backups to a different location.
554   */
555  private void validateRootPathCanBeOverridden(String originalPath, String backupId)
556    throws IOException {
557    String anotherRootDir = "/some/other/root/dir";
558    Path anotherPath = new Path(anotherRootDir, backupId);
559    BackupManifest.BackupImage differentLocationImage = BackupManifest.hydrateRootDir(
560      HBackupFileSystem.getManifest(conf1, new Path(originalPath), backupId).getBackupImage(),
561      anotherPath);
562    assertEquals(differentLocationImage.getRootDir(), anotherRootDir);
563    for (BackupManifest.BackupImage ancestor : differentLocationImage.getAncestors()) {
564      assertEquals(anotherRootDir, ancestor.getRootDir());
565    }
566  }
567
568  private List<LocatedFileStatus> getBackupFiles() throws IOException {
569    FileSystem fs = TEST_UTIL.getTestFileSystem();
570    RemoteIterator<LocatedFileStatus> iter = fs.listFiles(new Path(BACKUP_ROOT_DIR), true);
571    List<LocatedFileStatus> files = new ArrayList<>();
572
573    while (iter.hasNext()) {
574      files.add(iter.next());
575    }
576
577    return files;
578  }
579}