001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.snapshot; 019 020import static org.apache.hadoop.util.ToolRunner.run; 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.assertFalse; 023import static org.junit.Assert.assertTrue; 024 025import java.io.IOException; 026import java.util.ArrayList; 027import java.util.HashMap; 028import java.util.HashSet; 029import java.util.List; 030import java.util.Map; 031import java.util.Objects; 032import java.util.Optional; 033import java.util.Set; 034import java.util.stream.Collectors; 035import org.apache.hadoop.conf.Configuration; 036import org.apache.hadoop.fs.FileStatus; 037import org.apache.hadoop.fs.FileSystem; 038import org.apache.hadoop.fs.Path; 039import org.apache.hadoop.hbase.HBaseClassTestRule; 040import org.apache.hadoop.hbase.HBaseTestingUtil; 041import org.apache.hadoop.hbase.HConstants; 042import org.apache.hadoop.hbase.TableName; 043import org.apache.hadoop.hbase.client.Admin; 044import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 045import org.apache.hadoop.hbase.client.Put; 046import org.apache.hadoop.hbase.client.RegionInfo; 047import org.apache.hadoop.hbase.client.SnapshotType; 048import org.apache.hadoop.hbase.client.Table; 049import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 050import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; 051import org.apache.hadoop.hbase.regionserver.StoreFileInfo; 052import org.apache.hadoop.hbase.testclassification.LargeTests; 053import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; 054import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool; 055import org.apache.hadoop.hbase.util.AbstractHBaseTool; 056import org.apache.hadoop.hbase.util.Bytes; 057import org.apache.hadoop.hbase.util.CommonFSUtils; 058import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 059import org.apache.hadoop.hbase.util.HFileTestUtil; 060import org.apache.hadoop.hbase.util.Pair; 061import org.junit.After; 062import org.junit.AfterClass; 063import org.junit.Before; 064import org.junit.BeforeClass; 065import org.junit.ClassRule; 066import org.junit.Rule; 067import org.junit.Test; 068import org.junit.experimental.categories.Category; 069import org.junit.rules.TestName; 070import org.slf4j.Logger; 071import org.slf4j.LoggerFactory; 072 073import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 074 075import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; 076import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; 077 078/** 079 * Test Export Snapshot Tool 080 */ 081@Category({ VerySlowMapReduceTests.class, LargeTests.class }) 082public class TestExportSnapshot { 083 084 @ClassRule 085 public static final HBaseClassTestRule CLASS_RULE = 086 HBaseClassTestRule.forClass(TestExportSnapshot.class); 087 088 private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshot.class); 089 090 protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); 091 092 protected final static byte[] FAMILY = Bytes.toBytes("cf"); 093 094 @Rule 095 public final TestName testName = new TestName(); 096 097 protected TableName tableName; 098 private String emptySnapshotName; 099 private String snapshotName; 100 private int tableNumFiles; 101 private Admin admin; 102 103 public static void setUpBaseConf(Configuration conf) { 104 conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); 105 conf.setInt("hbase.regionserver.msginterval", 100); 106 // If a single node has enough failures (default 3), resource manager will blacklist it. 107 // With only 2 nodes and tests injecting faults, we don't want that. 108 conf.setInt("mapreduce.job.maxtaskfailures.per.tracker", 100); 109 } 110 111 @BeforeClass 112 public static void setUpBeforeClass() throws Exception { 113 setUpBaseConf(TEST_UTIL.getConfiguration()); 114 TEST_UTIL.startMiniCluster(1); 115 TEST_UTIL.startMiniMapReduceCluster(); 116 } 117 118 @AfterClass 119 public static void tearDownAfterClass() throws Exception { 120 TEST_UTIL.shutdownMiniMapReduceCluster(); 121 TEST_UTIL.shutdownMiniCluster(); 122 } 123 124 /** 125 * Create a table and take a snapshot of the table used by the export test. 126 */ 127 @Before 128 public void setUp() throws Exception { 129 this.admin = TEST_UTIL.getAdmin(); 130 131 tableName = TableName.valueOf("testtb-" + testName.getMethodName()); 132 snapshotName = "snaptb0-" + testName.getMethodName(); 133 emptySnapshotName = "emptySnaptb0-" + testName.getMethodName(); 134 135 // create Table 136 createTable(this.tableName); 137 138 // Take an empty snapshot 139 admin.snapshot(emptySnapshotName, tableName); 140 141 // Add some rows 142 SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, FAMILY); 143 tableNumFiles = admin.getRegions(tableName).size(); 144 145 // take a snapshot 146 admin.snapshot(snapshotName, tableName); 147 } 148 149 protected void createTable(TableName tableName) throws Exception { 150 SnapshotTestingUtils.createPreSplitTable(TEST_UTIL, tableName, 2, FAMILY); 151 } 152 153 protected interface RegionPredicate { 154 boolean evaluate(final RegionInfo regionInfo); 155 } 156 157 protected RegionPredicate getBypassRegionPredicate() { 158 return null; 159 } 160 161 @After 162 public void tearDown() throws Exception { 163 TEST_UTIL.deleteTable(tableName); 164 SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin()); 165 SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL); 166 } 167 168 /** 169 * Verify if exported snapshot and copied files matches the original one. 170 */ 171 @Test 172 public void testExportFileSystemState() throws Exception { 173 testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles); 174 } 175 176 @Test 177 public void testExportFileSystemStateWithMergeRegion() throws Exception { 178 // disable compaction 179 admin.compactionSwitch(false, 180 admin.getRegionServers().stream().map(a -> a.getServerName()).collect(Collectors.toList())); 181 // create Table 182 TableName tableName0 = TableName.valueOf("testtb-" + testName.getMethodName() + "-1"); 183 String snapshotName0 = "snaptb0-" + testName.getMethodName() + "-1"; 184 admin.createTable( 185 TableDescriptorBuilder.newBuilder(tableName0) 186 .setColumnFamilies( 187 Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())) 188 .build(), 189 new byte[][] { Bytes.toBytes("2") }); 190 // put some data 191 try (Table table = admin.getConnection().getTable(tableName0)) { 192 table.put(new Put(Bytes.toBytes("1")).addColumn(FAMILY, null, Bytes.toBytes("1"))); 193 table.put(new Put(Bytes.toBytes("2")).addColumn(FAMILY, null, Bytes.toBytes("2"))); 194 } 195 List<RegionInfo> regions = admin.getRegions(tableName0); 196 assertEquals(2, regions.size()); 197 tableNumFiles = regions.size(); 198 // merge region 199 admin.mergeRegionsAsync(new byte[][] { regions.get(0).getEncodedNameAsBytes(), 200 regions.get(1).getEncodedNameAsBytes() }, true).get(); 201 // take a snapshot 202 admin.snapshot(snapshotName0, tableName0); 203 // export snapshot and verify 204 testExportFileSystemState(tableName0, snapshotName0, snapshotName0, tableNumFiles); 205 // delete table 206 TEST_UTIL.deleteTable(tableName0); 207 } 208 209 @Test 210 public void testExportFileSystemStateWithSplitRegion() throws Exception { 211 // disable compaction 212 admin.compactionSwitch(false, 213 admin.getRegionServers().stream().map(a -> a.getServerName()).collect(Collectors.toList())); 214 // create Table 215 TableName splitTableName = TableName.valueOf(testName.getMethodName()); 216 String splitTableSnap = "snapshot-" + testName.getMethodName(); 217 admin.createTable(TableDescriptorBuilder.newBuilder(splitTableName).setColumnFamilies( 218 Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())).build()); 219 220 Path output = TEST_UTIL.getDataTestDir("output/cf"); 221 TEST_UTIL.getTestFileSystem().mkdirs(output); 222 // Create and load a large hfile to ensure the execution time of MR job. 223 HFileTestUtil.createHFile(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(), 224 new Path(output, "test_file"), FAMILY, Bytes.toBytes("q"), Bytes.toBytes("1"), 225 Bytes.toBytes("9"), 9999999); 226 BulkLoadHFilesTool tool = new BulkLoadHFilesTool(TEST_UTIL.getConfiguration()); 227 tool.run(new String[] { output.getParent().toString(), splitTableName.getNameAsString() }); 228 229 List<RegionInfo> regions = admin.getRegions(splitTableName); 230 assertEquals(1, regions.size()); 231 tableNumFiles = regions.size(); 232 233 // split region 234 admin.split(splitTableName, Bytes.toBytes("5")); 235 regions = admin.getRegions(splitTableName); 236 assertEquals(2, regions.size()); 237 238 // take a snapshot 239 admin.snapshot(splitTableSnap, splitTableName); 240 // export snapshot and verify 241 Configuration tmpConf = TEST_UTIL.getConfiguration(); 242 // Decrease the buffer size of copier to avoid the export task finished shortly 243 tmpConf.setInt("snapshot.export.buffer.size", 1); 244 // Decrease the maximum files of each mapper to ensure the three files(1 hfile + 2 reference 245 // files) 246 // copied in different mappers concurrently. 247 tmpConf.setInt("snapshot.export.default.map.group", 1); 248 testExportFileSystemState(tmpConf, splitTableName, splitTableSnap, splitTableSnap, 249 tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), getHdfsDestinationDir(), false, false, 250 getBypassRegionPredicate(), true, false); 251 // delete table 252 TEST_UTIL.deleteTable(splitTableName); 253 } 254 255 @Test 256 public void testExportFileSystemStateWithSkipTmp() throws Exception { 257 TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, true); 258 try { 259 testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles); 260 } finally { 261 TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, false); 262 } 263 } 264 265 @Test 266 public void testEmptyExportFileSystemState() throws Exception { 267 testExportFileSystemState(tableName, emptySnapshotName, emptySnapshotName, 0); 268 } 269 270 @Test 271 public void testConsecutiveExports() throws Exception { 272 Path copyDir = getLocalDestinationDir(TEST_UTIL); 273 testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, copyDir, false); 274 testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, copyDir, true); 275 removeExportDir(copyDir); 276 } 277 278 @Test 279 public void testExportWithChecksum() throws Exception { 280 // Test different schemes: input scheme is hdfs:// and output scheme is file:// 281 // The checksum verification will fail 282 Path copyLocalDir = getLocalDestinationDir(TEST_UTIL); 283 testExportFileSystemState(TEST_UTIL.getConfiguration(), tableName, snapshotName, snapshotName, 284 tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyLocalDir, false, false, 285 getBypassRegionPredicate(), false, true); 286 287 // Test same schemes: input scheme is hdfs:// and output scheme is hdfs:// 288 // The checksum verification will success 289 Path copyHdfsDir = getHdfsDestinationDir(); 290 testExportFileSystemState(TEST_UTIL.getConfiguration(), tableName, snapshotName, snapshotName, 291 tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyHdfsDir, false, false, 292 getBypassRegionPredicate(), true, true); 293 } 294 295 @Test 296 public void testExportWithTargetName() throws Exception { 297 final String targetName = "testExportWithTargetName"; 298 testExportFileSystemState(tableName, snapshotName, targetName, tableNumFiles); 299 } 300 301 @Test 302 public void testExportWithResetTtl() throws Exception { 303 String name = "testExportWithResetTtl"; 304 TableName tableName = TableName.valueOf(name); 305 String snapshotName = "snaptb-" + name; 306 Long ttl = 100000L; 307 308 try { 309 // create Table 310 createTable(tableName); 311 SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, FAMILY); 312 int tableNumFiles = admin.getRegions(tableName).size(); 313 // take a snapshot with TTL 314 Map<String, Object> props = new HashMap<>(); 315 props.put("TTL", ttl); 316 admin.snapshot(snapshotName, tableName, props); 317 Optional<Long> ttlOpt = 318 admin.listSnapshots().stream().filter(s -> s.getName().equals(snapshotName)) 319 .map(org.apache.hadoop.hbase.client.SnapshotDescription::getTtl).findAny(); 320 assertTrue(ttlOpt.isPresent()); 321 assertEquals(ttl, ttlOpt.get()); 322 323 testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, 324 getHdfsDestinationDir(), false, true); 325 } catch (Exception e) { 326 throw e; 327 } finally { 328 TEST_UTIL.deleteTable(tableName); 329 } 330 } 331 332 @Test 333 public void testExportExpiredSnapshot() throws Exception { 334 String name = "testExportExpiredSnapshot"; 335 TableName tableName = TableName.valueOf(name); 336 String snapshotName = "snapshot-" + name; 337 createTable(tableName); 338 SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, FAMILY); 339 Map<String, Object> properties = new HashMap<>(); 340 properties.put("TTL", 10); 341 org.apache.hadoop.hbase.client.SnapshotDescription snapshotDescription = 342 new org.apache.hadoop.hbase.client.SnapshotDescription(snapshotName, tableName, 343 SnapshotType.FLUSH, null, EnvironmentEdgeManager.currentTime(), -1, properties); 344 admin.snapshot(snapshotDescription); 345 boolean isExist = 346 admin.listSnapshots().stream().anyMatch(ele -> snapshotName.equals(ele.getName())); 347 assertTrue(isExist); 348 int retry = 6; 349 while ( 350 !SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDescription.getTtl(), 351 snapshotDescription.getCreationTime(), EnvironmentEdgeManager.currentTime()) && retry > 0 352 ) { 353 retry--; 354 Thread.sleep(10 * 1000); 355 } 356 boolean isExpiredSnapshot = 357 SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDescription.getTtl(), 358 snapshotDescription.getCreationTime(), EnvironmentEdgeManager.currentTime()); 359 assertTrue(isExpiredSnapshot); 360 int res = runExportSnapshot(TEST_UTIL.getConfiguration(), snapshotName, snapshotName, 361 TEST_UTIL.getDefaultRootDirPath(), getHdfsDestinationDir(), false, false, false, true, true); 362 assertTrue(res == AbstractHBaseTool.EXIT_FAILURE); 363 } 364 365 private void testExportFileSystemState(final TableName tableName, final String snapshotName, 366 final String targetName, int filesExpected) throws Exception { 367 testExportFileSystemState(tableName, snapshotName, targetName, filesExpected, 368 getHdfsDestinationDir(), false); 369 } 370 371 protected void testExportFileSystemState(final TableName tableName, final String snapshotName, 372 final String targetName, int filesExpected, Path copyDir, boolean overwrite) throws Exception { 373 testExportFileSystemState(tableName, snapshotName, targetName, filesExpected, copyDir, 374 overwrite, false); 375 } 376 377 protected void testExportFileSystemState(final TableName tableName, final String snapshotName, 378 final String targetName, int filesExpected, Path copyDir, boolean overwrite, boolean resetTtl) 379 throws Exception { 380 testExportFileSystemState(TEST_UTIL.getConfiguration(), tableName, snapshotName, targetName, 381 filesExpected, TEST_UTIL.getDefaultRootDirPath(), copyDir, overwrite, resetTtl, 382 getBypassRegionPredicate(), true, false); 383 } 384 385 /** 386 * Creates destination directory, runs ExportSnapshot() tool, and runs some verifications. 387 */ 388 protected static void testExportFileSystemState(final Configuration conf, 389 final TableName tableName, final String snapshotName, final String targetName, 390 final int filesExpected, final Path srcDir, Path rawTgtDir, final boolean overwrite, 391 final boolean resetTtl, final RegionPredicate bypassregionPredicate, final boolean success, 392 final boolean checksumVerify) throws Exception { 393 FileSystem tgtFs = rawTgtDir.getFileSystem(conf); 394 FileSystem srcFs = srcDir.getFileSystem(conf); 395 Path tgtDir = rawTgtDir.makeQualified(tgtFs.getUri(), tgtFs.getWorkingDirectory()); 396 397 // Export Snapshot 398 int res = runExportSnapshot(conf, snapshotName, targetName, srcDir, rawTgtDir, overwrite, 399 resetTtl, checksumVerify, true, true); 400 assertEquals("success " + success + ", res=" + res, success ? 0 : 1, res); 401 if (!success) { 402 final Path targetDir = new Path(HConstants.SNAPSHOT_DIR_NAME, targetName); 403 assertFalse(tgtDir.toString() + " " + targetDir.toString(), 404 tgtFs.exists(new Path(tgtDir, targetDir))); 405 return; 406 } 407 LOG.info("Exported snapshot"); 408 409 // Verify File-System state 410 FileStatus[] rootFiles = tgtFs.listStatus(tgtDir); 411 assertEquals(filesExpected > 0 ? 2 : 1, rootFiles.length); 412 for (FileStatus fileStatus : rootFiles) { 413 String name = fileStatus.getPath().getName(); 414 assertTrue(fileStatus.toString(), fileStatus.isDirectory()); 415 assertTrue(name.toString(), name.equals(HConstants.SNAPSHOT_DIR_NAME) 416 || name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY)); 417 } 418 LOG.info("Verified filesystem state"); 419 420 // Compare the snapshot metadata and verify the hfiles 421 final Path snapshotDir = new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName); 422 final Path targetDir = new Path(HConstants.SNAPSHOT_DIR_NAME, targetName); 423 verifySnapshotDir(srcFs, new Path(srcDir, snapshotDir), tgtFs, new Path(tgtDir, targetDir)); 424 Set<String> snapshotFiles = 425 verifySnapshot(conf, tgtFs, tgtDir, tableName, targetName, resetTtl, bypassregionPredicate); 426 assertEquals(filesExpected, snapshotFiles.size()); 427 } 428 429 /* 430 * verify if the snapshot folder on file-system 1 match the one on file-system 2 431 */ 432 protected static void verifySnapshotDir(final FileSystem fs1, final Path root1, 433 final FileSystem fs2, final Path root2) throws IOException { 434 assertEquals(listFiles(fs1, root1, root1), listFiles(fs2, root2, root2)); 435 } 436 437 /* 438 * Verify if the files exists 439 */ 440 protected static Set<String> verifySnapshot(final Configuration conf, final FileSystem fs, 441 final Path rootDir, final TableName tableName, final String snapshotName, 442 final boolean resetTtl, final RegionPredicate bypassregionPredicate) throws IOException { 443 final Path exportedSnapshot = 444 new Path(rootDir, new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); 445 final Set<String> snapshotFiles = new HashSet<>(); 446 final Path exportedArchive = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); 447 SnapshotReferenceUtil.visitReferencedFiles(conf, fs, exportedSnapshot, 448 new SnapshotReferenceUtil.SnapshotVisitor() { 449 @Override 450 public void storeFile(final RegionInfo regionInfo, final String family, 451 final SnapshotRegionManifest.StoreFile storeFile) throws IOException { 452 if (bypassregionPredicate != null && bypassregionPredicate.evaluate(regionInfo)) { 453 return; 454 } 455 456 if (!storeFile.hasReference() && !StoreFileInfo.isReference(storeFile.getName())) { 457 String hfile = storeFile.getName(); 458 snapshotFiles.add(hfile); 459 verifyNonEmptyFile(new Path(exportedArchive, 460 new Path(CommonFSUtils.getTableDir(new Path("./"), tableName), 461 new Path(regionInfo.getEncodedName(), new Path(family, hfile))))); 462 } else { 463 Pair<String, String> referredToRegionAndFile = 464 StoreFileInfo.getReferredToRegionAndFile(storeFile.getName()); 465 String region = referredToRegionAndFile.getFirst(); 466 String hfile = referredToRegionAndFile.getSecond(); 467 snapshotFiles.add(hfile); 468 verifyNonEmptyFile(new Path(exportedArchive, 469 new Path(CommonFSUtils.getTableDir(new Path("./"), tableName), 470 new Path(region, new Path(family, hfile))))); 471 } 472 } 473 474 private void verifyNonEmptyFile(final Path path) throws IOException { 475 assertTrue(path + " should exists", fs.exists(path)); 476 assertTrue(path + " should not be empty", fs.getFileStatus(path).getLen() > 0); 477 } 478 }); 479 480 // Verify Snapshot description 481 SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, exportedSnapshot); 482 assertTrue(desc.getName().equals(snapshotName)); 483 assertTrue(desc.getTable().equals(tableName.getNameAsString())); 484 if (resetTtl) { 485 assertEquals(HConstants.DEFAULT_SNAPSHOT_TTL, desc.getTtl()); 486 } 487 return snapshotFiles; 488 } 489 490 private static Set<String> listFiles(final FileSystem fs, final Path root, final Path dir) 491 throws IOException { 492 Set<String> files = new HashSet<>(); 493 LOG.debug("List files in {} in root {} at {}", fs, root, dir); 494 int rootPrefix = root.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString().length(); 495 FileStatus[] list = CommonFSUtils.listStatus(fs, dir); 496 if (list != null) { 497 for (FileStatus fstat : list) { 498 LOG.debug(Objects.toString(fstat.getPath())); 499 if (fstat.isDirectory()) { 500 files.addAll(listFiles(fs, root, fstat.getPath())); 501 } else { 502 files.add(fstat.getPath().makeQualified(fs).toString().substring(rootPrefix)); 503 } 504 } 505 } 506 return files; 507 } 508 509 private Path getHdfsDestinationDir() { 510 Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); 511 Path path = 512 new Path(new Path(rootDir, "export-test"), "export-" + EnvironmentEdgeManager.currentTime()); 513 LOG.info("HDFS export destination path: " + path); 514 return path; 515 } 516 517 static Path getLocalDestinationDir(HBaseTestingUtil htu) { 518 Path path = htu.getDataTestDir("local-export-" + EnvironmentEdgeManager.currentTime()); 519 try { 520 FileSystem fs = FileSystem.getLocal(htu.getConfiguration()); 521 LOG.info("Local export destination path: " + path); 522 return path.makeQualified(fs.getUri(), fs.getWorkingDirectory()); 523 } catch (IOException ioe) { 524 throw new RuntimeException(ioe); 525 } 526 } 527 528 private static void removeExportDir(final Path path) throws IOException { 529 FileSystem fs = FileSystem.get(path.toUri(), new Configuration()); 530 fs.delete(path, true); 531 } 532 533 private static int runExportSnapshot(final Configuration conf, final String sourceSnapshotName, 534 final String targetSnapshotName, final Path srcDir, Path rawTgtDir, final boolean overwrite, 535 final boolean resetTtl, final boolean checksumVerify, final boolean noSourceVerify, 536 final boolean noTargetVerify) throws Exception { 537 FileSystem tgtFs = rawTgtDir.getFileSystem(conf); 538 FileSystem srcFs = srcDir.getFileSystem(conf); 539 Path tgtDir = rawTgtDir.makeQualified(tgtFs.getUri(), tgtFs.getWorkingDirectory()); 540 LOG.info("tgtFsUri={}, tgtDir={}, rawTgtDir={}, srcFsUri={}, srcDir={}", tgtFs.getUri(), tgtDir, 541 rawTgtDir, srcFs.getUri(), srcDir); 542 List<String> opts = new ArrayList<>(); 543 opts.add("--snapshot"); 544 opts.add(sourceSnapshotName); 545 opts.add("--copy-to"); 546 opts.add(tgtDir.toString()); 547 if (!targetSnapshotName.equals(sourceSnapshotName)) { 548 opts.add("--target"); 549 opts.add(targetSnapshotName); 550 } 551 if (overwrite) { 552 opts.add("--overwrite"); 553 } 554 if (resetTtl) { 555 opts.add("--reset-ttl"); 556 } 557 if (!checksumVerify) { 558 opts.add("--no-checksum-verify"); 559 } 560 if (!noSourceVerify) { 561 opts.add("--no-source-verify"); 562 } 563 if (!noTargetVerify) { 564 opts.add("--no-target-verify"); 565 } 566 567 // Export Snapshot 568 return run(conf, new ExportSnapshot(), opts.toArray(new String[opts.size()])); 569 } 570}