001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.backup; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertNotEquals; 023import static org.junit.Assert.assertThrows; 024import static org.junit.Assert.assertTrue; 025 026import java.io.File; 027import java.io.IOException; 028import java.nio.ByteBuffer; 029import java.util.ArrayList; 030import java.util.Collection; 031import java.util.HashSet; 032import java.util.List; 033import java.util.Map; 034import java.util.UUID; 035import org.apache.hadoop.fs.FileSystem; 036import org.apache.hadoop.fs.LocatedFileStatus; 037import org.apache.hadoop.fs.Path; 038import org.apache.hadoop.fs.RemoteIterator; 039import org.apache.hadoop.hbase.HBaseClassTestRule; 040import org.apache.hadoop.hbase.HBaseTestingUtil; 041import org.apache.hadoop.hbase.SingleProcessHBaseCluster; 042import org.apache.hadoop.hbase.TableName; 043import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; 044import org.apache.hadoop.hbase.backup.impl.BackupManifest; 045import org.apache.hadoop.hbase.backup.impl.ColumnFamilyMismatchException; 046import org.apache.hadoop.hbase.backup.util.BackupUtils; 047import org.apache.hadoop.hbase.client.Admin; 048import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 049import org.apache.hadoop.hbase.client.Connection; 050import org.apache.hadoop.hbase.client.ConnectionFactory; 051import org.apache.hadoop.hbase.client.Put; 052import org.apache.hadoop.hbase.client.Table; 053import org.apache.hadoop.hbase.client.TableDescriptor; 054import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 055import org.apache.hadoop.hbase.regionserver.HRegion; 056import org.apache.hadoop.hbase.regionserver.LogRoller; 057import org.apache.hadoop.hbase.testclassification.LargeTests; 058import org.apache.hadoop.hbase.tool.BulkLoadHFiles; 059import org.apache.hadoop.hbase.util.Bytes; 060import org.apache.hadoop.hbase.util.CommonFSUtils; 061import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 062import org.apache.hadoop.hbase.util.HFileTestUtil; 063import org.junit.After; 064import org.junit.Assert; 065import org.junit.ClassRule; 066import org.junit.Test; 067import org.junit.experimental.categories.Category; 068import org.junit.runner.RunWith; 069import org.junit.runners.Parameterized; 070import org.slf4j.Logger; 071import org.slf4j.LoggerFactory; 072 073import org.apache.hbase.thirdparty.com.google.common.base.Throwables; 074import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 075import org.apache.hbase.thirdparty.com.google.common.collect.Sets; 076 077@Category(LargeTests.class) 078@RunWith(Parameterized.class) 079public class TestIncrementalBackup extends TestBackupBase { 080 081 @ClassRule 082 public static final HBaseClassTestRule CLASS_RULE = 083 HBaseClassTestRule.forClass(TestIncrementalBackup.class); 084 085 private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackup.class); 086 private static final byte[] BULKLOAD_START_KEY = new byte[] { 0x00 }; 087 private static final byte[] BULKLOAD_END_KEY = new byte[] { Byte.MAX_VALUE }; 088 089 @Parameterized.Parameters 090 public static Collection<Object[]> data() { 091 provider = "multiwal"; 092 List<Object[]> params = new ArrayList<>(); 093 params.add(new Object[] { Boolean.TRUE }); 094 return params; 095 } 096 097 public TestIncrementalBackup(Boolean b) { 098 } 099 100 @After 101 public void ensurePreviousBackupTestsAreCleanedUp() throws Exception { 102 TEST_UTIL.flush(table1); 103 TEST_UTIL.flush(table2); 104 TEST_UTIL.flush(table1_restore); 105 106 TEST_UTIL.truncateTable(table1).close(); 107 TEST_UTIL.truncateTable(table2).close(); 108 TEST_UTIL.truncateTable(table1_restore).close(); 109 110 TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().forEach(rst -> { 111 try { 112 LogRoller walRoller = rst.getRegionServer().getWalRoller(); 113 walRoller.requestRollAll(); 114 walRoller.waitUntilWalRollFinished(); 115 } catch (Exception ignored) { 116 } 117 }); 118 119 try (Table table = TEST_UTIL.getConnection().getTable(table1)) { 120 loadTable(table); 121 } 122 123 try (Table table = TEST_UTIL.getConnection().getTable(table2)) { 124 loadTable(table); 125 } 126 } 127 128 // implement all test cases in 1 test since incremental 129 // backup/restore has dependencies 130 @Test 131 public void TestIncBackupRestore() throws Exception { 132 int ADD_ROWS = 99; 133 134 // #1 - create full backup for all tables 135 LOG.info("create full backup image for all tables"); 136 List<TableName> tables = Lists.newArrayList(table1, table2); 137 final byte[] fam3Name = Bytes.toBytes("f3"); 138 final byte[] mobName = Bytes.toBytes("mob"); 139 140 TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc) 141 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)) 142 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true) 143 .setMobThreshold(5L).build()) 144 .build(); 145 TEST_UTIL.getAdmin().modifyTable(newTable1Desc); 146 147 try (Connection conn = ConnectionFactory.createConnection(conf1)) { 148 int NB_ROWS_FAM3 = 6; 149 insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close(); 150 insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close(); 151 Admin admin = conn.getAdmin(); 152 BackupAdminImpl client = new BackupAdminImpl(conn); 153 BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); 154 String backupIdFull = takeFullBackup(tables, client); 155 validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdFull); 156 assertTrue(checkSucceeded(backupIdFull)); 157 158 // #2 - insert some data to table 159 Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS); 160 LOG.debug("writing " + ADD_ROWS + " rows to " + table1); 161 Assert.assertEquals(HBaseTestingUtil.countRows(t1), 162 NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3); 163 LOG.debug("written " + ADD_ROWS + " rows to " + table1); 164 // additionally, insert rows to MOB cf 165 int NB_ROWS_MOB = 111; 166 insertIntoTable(conn, table1, mobName, 3, NB_ROWS_MOB); 167 LOG.debug("written " + NB_ROWS_MOB + " rows to " + table1 + " to Mob enabled CF"); 168 t1.close(); 169 Assert.assertEquals(HBaseTestingUtil.countRows(t1), 170 NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB); 171 Table t2 = conn.getTable(table2); 172 Put p2; 173 for (int i = 0; i < 5; i++) { 174 p2 = new Put(Bytes.toBytes("row-t2" + i)); 175 p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); 176 t2.put(p2); 177 } 178 Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(t2)); 179 t2.close(); 180 LOG.debug("written " + 5 + " rows to " + table2); 181 // split table1 182 SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); 183 List<HRegion> regions = cluster.getRegions(table1); 184 byte[] name = regions.get(0).getRegionInfo().getRegionName(); 185 long startSplitTime = EnvironmentEdgeManager.currentTime(); 186 try { 187 admin.splitRegionAsync(name).get(); 188 } catch (Exception e) { 189 // although split fail, this may not affect following check in current API, 190 // exception will be thrown. 191 LOG.debug("region is not splittable, because " + e); 192 } 193 TEST_UTIL.waitTableAvailable(table1); 194 long endSplitTime = EnvironmentEdgeManager.currentTime(); 195 // split finished 196 LOG.debug("split finished in =" + (endSplitTime - startSplitTime)); 197 198 // #3 - incremental backup for multiple tables 199 tables = Lists.newArrayList(table1, table2); 200 request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); 201 String backupIdIncMultiple = client.backupTables(request); 202 assertTrue(checkSucceeded(backupIdIncMultiple)); 203 BackupManifest manifest = 204 HBackupFileSystem.getManifest(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncMultiple); 205 assertEquals(Sets.newHashSet(table1, table2), new HashSet<>(manifest.getTableList())); 206 validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdIncMultiple); 207 208 // add column family f2 to table1 209 // drop column family f3 210 final byte[] fam2Name = Bytes.toBytes("f2"); 211 newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc) 212 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name) 213 .build(); 214 TEST_UTIL.getAdmin().modifyTable(newTable1Desc); 215 216 // check that an incremental backup fails because the CFs don't match 217 final List<TableName> tablesCopy = tables; 218 IOException ex = assertThrows(IOException.class, () -> client 219 .backupTables(createBackupRequest(BackupType.INCREMENTAL, tablesCopy, BACKUP_ROOT_DIR))); 220 checkThrowsCFMismatch(ex, List.of(table1)); 221 takeFullBackup(tables, client); 222 223 int NB_ROWS_FAM2 = 7; 224 Table t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2); 225 t3.close(); 226 227 // Wait for 5 sec to make sure that old WALs were deleted 228 Thread.sleep(5000); 229 230 // #4 - additional incremental backup for multiple tables 231 request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); 232 String backupIdIncMultiple2 = client.backupTables(request); 233 assertTrue(checkSucceeded(backupIdIncMultiple2)); 234 validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdIncMultiple2); 235 236 // #5 - restore full backup for all tables 237 TableName[] tablesRestoreFull = new TableName[] { table1, table2 }; 238 TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore }; 239 240 LOG.debug("Restoring full " + backupIdFull); 241 client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, 242 tablesRestoreFull, tablesMapFull, true)); 243 244 // #6.1 - check tables for full restore 245 Admin hAdmin = TEST_UTIL.getAdmin(); 246 assertTrue(hAdmin.tableExists(table1_restore)); 247 assertTrue(hAdmin.tableExists(table2_restore)); 248 hAdmin.close(); 249 250 // #6.2 - checking row count of tables for full restore 251 Table hTable = conn.getTable(table1_restore); 252 Assert.assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3); 253 hTable.close(); 254 255 hTable = conn.getTable(table2_restore); 256 Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtil.countRows(hTable)); 257 hTable.close(); 258 259 // #7 - restore incremental backup for multiple tables, with overwrite 260 TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 }; 261 TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore }; 262 client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, 263 tablesRestoreIncMultiple, tablesMapIncMultiple, true)); 264 hTable = conn.getTable(table1_restore); 265 266 LOG.debug("After incremental restore: " + hTable.getDescriptor()); 267 int countFamName = TEST_UTIL.countRows(hTable, famName); 268 LOG.debug("f1 has " + countFamName + " rows"); 269 Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS); 270 271 int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name); 272 LOG.debug("f2 has " + countFam2Name + " rows"); 273 Assert.assertEquals(countFam2Name, NB_ROWS_FAM2); 274 275 int countMobName = TEST_UTIL.countRows(hTable, mobName); 276 LOG.debug("mob has " + countMobName + " rows"); 277 Assert.assertEquals(countMobName, NB_ROWS_MOB); 278 hTable.close(); 279 280 hTable = conn.getTable(table2_restore); 281 Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(hTable)); 282 hTable.close(); 283 admin.close(); 284 } 285 } 286 287 @Test 288 public void TestIncBackupRestoreWithOriginalSplits() throws Exception { 289 byte[] mobFam = Bytes.toBytes("mob"); 290 291 List<TableName> tables = Lists.newArrayList(table1); 292 TableDescriptor newTable1Desc = 293 TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder 294 .newBuilder(mobFam).setMobEnabled(true).setMobThreshold(5L).build()).build(); 295 TEST_UTIL.getAdmin().modifyTable(newTable1Desc); 296 297 Connection conn = TEST_UTIL.getConnection(); 298 BackupAdminImpl backupAdmin = new BackupAdminImpl(conn); 299 BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); 300 String fullBackupId = backupAdmin.backupTables(request); 301 assertTrue(checkSucceeded(fullBackupId)); 302 303 TableName[] fromTables = new TableName[] { table1 }; 304 TableName[] toTables = new TableName[] { table1_restore }; 305 306 List<LocatedFileStatus> preRestoreBackupFiles = getBackupFiles(); 307 backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, fullBackupId, false, 308 fromTables, toTables, true, true)); 309 List<LocatedFileStatus> postRestoreBackupFiles = getBackupFiles(); 310 311 // Check that the backup files are the same before and after the restore process 312 Assert.assertEquals(postRestoreBackupFiles, preRestoreBackupFiles); 313 Assert.assertEquals(TEST_UTIL.countRows(table1_restore), NB_ROWS_IN_BATCH); 314 315 int ROWS_TO_ADD = 1_000; 316 // different IDs so that rows don't overlap 317 insertIntoTable(conn, table1, famName, 3, ROWS_TO_ADD); 318 insertIntoTable(conn, table1, mobFam, 4, ROWS_TO_ADD); 319 320 try (Admin admin = conn.getAdmin()) { 321 List<HRegion> currentRegions = TEST_UTIL.getHBaseCluster().getRegions(table1); 322 for (HRegion region : currentRegions) { 323 byte[] name = region.getRegionInfo().getEncodedNameAsBytes(); 324 admin.splitRegionAsync(name).get(); 325 } 326 327 TEST_UTIL.waitTableAvailable(table1); 328 329 // Make sure we've split regions 330 assertNotEquals(currentRegions, TEST_UTIL.getHBaseCluster().getRegions(table1)); 331 332 request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); 333 String incrementalBackupId = backupAdmin.backupTables(request); 334 assertTrue(checkSucceeded(incrementalBackupId)); 335 preRestoreBackupFiles = getBackupFiles(); 336 backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, incrementalBackupId, 337 false, fromTables, toTables, true, true)); 338 postRestoreBackupFiles = getBackupFiles(); 339 Assert.assertEquals(postRestoreBackupFiles, preRestoreBackupFiles); 340 Assert.assertEquals(NB_ROWS_IN_BATCH + ROWS_TO_ADD + ROWS_TO_ADD, 341 TEST_UTIL.countRows(table1_restore)); 342 343 // test bulkloads 344 HRegion regionToBulkload = TEST_UTIL.getHBaseCluster().getRegions(table1).get(0); 345 String regionName = regionToBulkload.getRegionInfo().getEncodedName(); 346 347 insertIntoTable(conn, table1, famName, 5, ROWS_TO_ADD); 348 insertIntoTable(conn, table1, mobFam, 6, ROWS_TO_ADD); 349 350 doBulkload(table1, regionName, famName, mobFam); 351 352 // we need to major compact the regions to make sure there are no references 353 // and the regions are once again splittable 354 TEST_UTIL.compact(true); 355 TEST_UTIL.flush(); 356 TEST_UTIL.waitTableAvailable(table1); 357 358 for (HRegion region : TEST_UTIL.getHBaseCluster().getRegions(table1)) { 359 if (region.isSplittable()) { 360 admin.splitRegionAsync(region.getRegionInfo().getEncodedNameAsBytes()).get(); 361 } 362 } 363 364 request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); 365 incrementalBackupId = backupAdmin.backupTables(request); 366 assertTrue(checkSucceeded(incrementalBackupId)); 367 368 preRestoreBackupFiles = getBackupFiles(); 369 backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, incrementalBackupId, 370 false, fromTables, toTables, true, true)); 371 postRestoreBackupFiles = getBackupFiles(); 372 373 Assert.assertEquals(postRestoreBackupFiles, preRestoreBackupFiles); 374 375 int rowsExpected = TEST_UTIL.countRows(table1); 376 int rowsActual = TEST_UTIL.countRows(table1_restore); 377 378 Assert.assertEquals(rowsExpected, rowsActual); 379 } 380 } 381 382 @Test 383 public void TestIncBackupRestoreWithOriginalSplitsSeperateFs() throws Exception { 384 String originalBackupRoot = BACKUP_ROOT_DIR; 385 // prepare BACKUP_ROOT_DIR on a different filesystem from HBase. 386 try (Connection conn = ConnectionFactory.createConnection(conf1); 387 BackupAdminImpl admin = new BackupAdminImpl(conn)) { 388 String backupTargetDir = TEST_UTIL.getDataTestDir("backupTarget").toString(); 389 BACKUP_ROOT_DIR = new File(backupTargetDir).toURI().toString(); 390 391 List<TableName> tables = Lists.newArrayList(table1); 392 393 insertIntoTable(conn, table1, famName, 3, 100); 394 String fullBackupId = takeFullBackup(tables, admin, true); 395 assertTrue(checkSucceeded(fullBackupId)); 396 397 insertIntoTable(conn, table1, famName, 4, 100); 398 399 HRegion regionToBulkload = TEST_UTIL.getHBaseCluster().getRegions(table1).get(0); 400 String regionName = regionToBulkload.getRegionInfo().getEncodedName(); 401 doBulkload(table1, regionName, famName); 402 403 BackupRequest request = 404 createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR, true); 405 String incrementalBackupId = admin.backupTables(request); 406 assertTrue(checkSucceeded(incrementalBackupId)); 407 408 TableName[] fromTable = new TableName[] { table1 }; 409 TableName[] toTable = new TableName[] { table1_restore }; 410 411 // Using original splits 412 admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, incrementalBackupId, false, 413 fromTable, toTable, true, true)); 414 415 int actualRowCount = TEST_UTIL.countRows(table1_restore); 416 int expectedRowCount = TEST_UTIL.countRows(table1); 417 assertEquals(expectedRowCount, actualRowCount); 418 419 // Using new splits 420 admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, incrementalBackupId, false, 421 fromTable, toTable, true, false)); 422 423 expectedRowCount = TEST_UTIL.countRows(table1); 424 assertEquals(expectedRowCount, actualRowCount); 425 426 } finally { 427 BACKUP_ROOT_DIR = originalBackupRoot; 428 } 429 430 } 431 432 private void checkThrowsCFMismatch(IOException ex, List<TableName> tables) { 433 Throwable cause = Throwables.getRootCause(ex); 434 assertEquals(cause.getClass(), ColumnFamilyMismatchException.class); 435 ColumnFamilyMismatchException e = (ColumnFamilyMismatchException) cause; 436 assertEquals(tables, e.getMismatchedTables()); 437 } 438 439 private String takeFullBackup(List<TableName> tables, BackupAdminImpl backupAdmin) 440 throws IOException { 441 return takeFullBackup(tables, backupAdmin, false); 442 } 443 444 private String takeFullBackup(List<TableName> tables, BackupAdminImpl backupAdmin, 445 boolean noChecksumVerify) throws IOException { 446 BackupRequest req = 447 createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR, noChecksumVerify); 448 String backupId = backupAdmin.backupTables(req); 449 checkSucceeded(backupId); 450 return backupId; 451 } 452 453 private static void doBulkload(TableName tn, String regionName, byte[]... fams) 454 throws IOException { 455 Path regionDir = createHFiles(tn, regionName, fams); 456 Map<BulkLoadHFiles.LoadQueueItem, ByteBuffer> results = 457 BulkLoadHFiles.create(conf1).bulkLoad(tn, regionDir); 458 assertFalse(results.isEmpty()); 459 } 460 461 private static Path createHFiles(TableName tn, String regionName, byte[]... fams) 462 throws IOException { 463 Path rootdir = CommonFSUtils.getRootDir(conf1); 464 Path regionDir = CommonFSUtils.getRegionDir(rootdir, tn, regionName); 465 466 FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); 467 fs.mkdirs(rootdir); 468 469 for (byte[] fam : fams) { 470 Path famDir = new Path(regionDir, Bytes.toString(fam)); 471 Path hFileDir = new Path(famDir, UUID.randomUUID().toString()); 472 HFileTestUtil.createHFile(conf1, fs, hFileDir, fam, qualName, BULKLOAD_START_KEY, 473 BULKLOAD_END_KEY, 1000); 474 } 475 476 return regionDir; 477 } 478 479 /** 480 * Check that backup manifest can be produced for a different root. Users may want to move 481 * existing backups to a different location. 482 */ 483 private void validateRootPathCanBeOverridden(String originalPath, String backupId) 484 throws IOException { 485 String anotherRootDir = "/some/other/root/dir"; 486 Path anotherPath = new Path(anotherRootDir, backupId); 487 BackupManifest.BackupImage differentLocationImage = BackupManifest.hydrateRootDir( 488 HBackupFileSystem.getManifest(conf1, new Path(originalPath), backupId).getBackupImage(), 489 anotherPath); 490 assertEquals(differentLocationImage.getRootDir(), anotherRootDir); 491 for (BackupManifest.BackupImage ancestor : differentLocationImage.getAncestors()) { 492 assertEquals(anotherRootDir, ancestor.getRootDir()); 493 } 494 } 495 496 private List<LocatedFileStatus> getBackupFiles() throws IOException { 497 FileSystem fs = TEST_UTIL.getTestFileSystem(); 498 RemoteIterator<LocatedFileStatus> iter = fs.listFiles(new Path(BACKUP_ROOT_DIR), true); 499 List<LocatedFileStatus> files = new ArrayList<>(); 500 501 while (iter.hasNext()) { 502 files.add(iter.next()); 503 } 504 505 return files; 506 } 507}