001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.snapshot; 019 020import static org.apache.hadoop.util.ToolRunner.run; 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.assertFalse; 023import static org.junit.Assert.assertTrue; 024 025import java.io.IOException; 026import java.net.URI; 027import java.util.ArrayList; 028import java.util.HashSet; 029import java.util.List; 030import java.util.Objects; 031import java.util.Set; 032import org.apache.hadoop.conf.Configuration; 033import org.apache.hadoop.fs.FileStatus; 034import org.apache.hadoop.fs.FileSystem; 035import org.apache.hadoop.fs.Path; 036import org.apache.hadoop.hbase.HBaseClassTestRule; 037import org.apache.hadoop.hbase.HBaseTestingUtility; 038import org.apache.hadoop.hbase.HConstants; 039import org.apache.hadoop.hbase.TableName; 040import org.apache.hadoop.hbase.client.Admin; 041import org.apache.hadoop.hbase.client.RegionInfo; 042import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; 043import org.apache.hadoop.hbase.testclassification.LargeTests; 044import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; 045import org.apache.hadoop.hbase.util.Bytes; 046import org.apache.hadoop.hbase.util.FSUtils; 047import org.junit.After; 048import org.junit.AfterClass; 049import org.junit.Before; 050import org.junit.BeforeClass; 051import org.junit.ClassRule; 052import org.junit.Rule; 053import org.junit.Test; 054import org.junit.experimental.categories.Category; 055import org.junit.rules.TestName; 056import org.slf4j.Logger; 057import org.slf4j.LoggerFactory; 058 059import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; 060import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; 061 062/** 063 * Test Export Snapshot Tool 064 */ 065@Category({VerySlowMapReduceTests.class, LargeTests.class}) 066public class TestExportSnapshot { 067 068 @ClassRule 069 public static final HBaseClassTestRule CLASS_RULE = 070 HBaseClassTestRule.forClass(TestExportSnapshot.class); 071 072 private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshot.class); 073 074 protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 075 076 protected final static byte[] FAMILY = Bytes.toBytes("cf"); 077 078 @Rule 079 public final TestName testName = new TestName(); 080 081 protected TableName tableName; 082 private byte[] emptySnapshotName; 083 private byte[] snapshotName; 084 private int tableNumFiles; 085 private Admin admin; 086 087 public static void setUpBaseConf(Configuration conf) { 088 conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); 089 conf.setInt("hbase.regionserver.msginterval", 100); 090 conf.setInt("hbase.client.pause", 250); 091 conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); 092 conf.setBoolean("hbase.master.enabletable.roundrobin", true); 093 conf.setInt("mapreduce.map.maxattempts", 10); 094 // If a single node has enough failures (default 3), resource manager will blacklist it. 095 // With only 2 nodes and tests injecting faults, we don't want that. 096 conf.setInt("mapreduce.job.maxtaskfailures.per.tracker", 100); 097 } 098 099 @BeforeClass 100 public static void setUpBeforeClass() throws Exception { 101 setUpBaseConf(TEST_UTIL.getConfiguration()); 102 TEST_UTIL.startMiniCluster(1, 3); 103 TEST_UTIL.startMiniMapReduceCluster(); 104 } 105 106 @AfterClass 107 public static void tearDownAfterClass() throws Exception { 108 TEST_UTIL.shutdownMiniMapReduceCluster(); 109 TEST_UTIL.shutdownMiniCluster(); 110 } 111 112 /** 113 * Create a table and take a snapshot of the table used by the export test. 114 */ 115 @Before 116 public void setUp() throws Exception { 117 this.admin = TEST_UTIL.getAdmin(); 118 119 tableName = TableName.valueOf("testtb-" + testName.getMethodName()); 120 snapshotName = Bytes.toBytes("snaptb0-" + testName.getMethodName()); 121 emptySnapshotName = Bytes.toBytes("emptySnaptb0-" + testName.getMethodName()); 122 123 // create Table 124 createTable(); 125 126 // Take an empty snapshot 127 admin.snapshot(emptySnapshotName, tableName); 128 129 // Add some rows 130 SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, FAMILY); 131 tableNumFiles = admin.getTableRegions(tableName).size(); 132 133 // take a snapshot 134 admin.snapshot(snapshotName, tableName); 135 } 136 137 protected void createTable() throws Exception { 138 SnapshotTestingUtils.createPreSplitTable(TEST_UTIL, tableName, 2, FAMILY); 139 } 140 141 protected interface RegionPredicate { 142 boolean evaluate(final RegionInfo regionInfo); 143 } 144 145 protected RegionPredicate getBypassRegionPredicate() { 146 return null; 147 } 148 149 @After 150 public void tearDown() throws Exception { 151 TEST_UTIL.deleteTable(tableName); 152 SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin()); 153 SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL); 154 } 155 156 /** 157 * Verify if exported snapshot and copied files matches the original one. 158 */ 159 @Test 160 public void testExportFileSystemState() throws Exception { 161 testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles); 162 } 163 164 @Test 165 public void testExportFileSystemStateWithSkipTmp() throws Exception { 166 TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, true); 167 try { 168 testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles); 169 } finally { 170 TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, false); 171 } 172 } 173 174 @Test 175 public void testEmptyExportFileSystemState() throws Exception { 176 testExportFileSystemState(tableName, emptySnapshotName, emptySnapshotName, 0); 177 } 178 179 @Test 180 public void testConsecutiveExports() throws Exception { 181 Path copyDir = getLocalDestinationDir(); 182 testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, copyDir, false); 183 testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, copyDir, true); 184 removeExportDir(copyDir); 185 } 186 187 @Test 188 public void testExportWithTargetName() throws Exception { 189 final byte[] targetName = Bytes.toBytes("testExportWithTargetName"); 190 testExportFileSystemState(tableName, snapshotName, targetName, tableNumFiles); 191 } 192 193 private void testExportFileSystemState(final TableName tableName, final byte[] snapshotName, 194 final byte[] targetName, int filesExpected) throws Exception { 195 testExportFileSystemState(tableName, snapshotName, targetName, 196 filesExpected, getHdfsDestinationDir(), false); 197 } 198 199 protected void testExportFileSystemState(final TableName tableName, 200 final byte[] snapshotName, final byte[] targetName, int filesExpected, 201 Path copyDir, boolean overwrite) throws Exception { 202 testExportFileSystemState(TEST_UTIL.getConfiguration(), tableName, snapshotName, targetName, 203 filesExpected, TEST_UTIL.getDefaultRootDirPath(), copyDir, 204 overwrite, getBypassRegionPredicate(), true); 205 } 206 207 /** 208 * Creates destination directory, runs ExportSnapshot() tool, and runs some verifications. 209 */ 210 protected static void testExportFileSystemState(final Configuration conf, final TableName tableName, 211 final byte[] snapshotName, final byte[] targetName, final int filesExpected, 212 final Path sourceDir, Path copyDir, final boolean overwrite, 213 final RegionPredicate bypassregionPredicate, boolean success) throws Exception { 214 URI hdfsUri = FileSystem.get(conf).getUri(); 215 FileSystem fs = FileSystem.get(copyDir.toUri(), new Configuration()); 216 copyDir = copyDir.makeQualified(fs); 217 218 List<String> opts = new ArrayList<>(); 219 opts.add("--snapshot"); 220 opts.add(Bytes.toString(snapshotName)); 221 opts.add("--copy-to"); 222 opts.add(copyDir.toString()); 223 if (targetName != snapshotName) { 224 opts.add("--target"); 225 opts.add(Bytes.toString(targetName)); 226 } 227 if (overwrite) opts.add("--overwrite"); 228 229 // Export Snapshot 230 int res = run(conf, new ExportSnapshot(), opts.toArray(new String[opts.size()])); 231 assertEquals(success ? 0 : 1, res); 232 if (!success) { 233 final Path targetDir = new Path(HConstants.SNAPSHOT_DIR_NAME, Bytes.toString(targetName)); 234 assertFalse(fs.exists(new Path(copyDir, targetDir))); 235 return; 236 } 237 238 // Verify File-System state 239 FileStatus[] rootFiles = fs.listStatus(copyDir); 240 assertEquals(filesExpected > 0 ? 2 : 1, rootFiles.length); 241 for (FileStatus fileStatus: rootFiles) { 242 String name = fileStatus.getPath().getName(); 243 assertTrue(fileStatus.isDirectory()); 244 assertTrue(name.equals(HConstants.SNAPSHOT_DIR_NAME) || 245 name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY)); 246 } 247 248 // compare the snapshot metadata and verify the hfiles 249 final FileSystem hdfs = FileSystem.get(hdfsUri, conf); 250 final Path snapshotDir = new Path(HConstants.SNAPSHOT_DIR_NAME, Bytes.toString(snapshotName)); 251 final Path targetDir = new Path(HConstants.SNAPSHOT_DIR_NAME, Bytes.toString(targetName)); 252 verifySnapshotDir(hdfs, new Path(sourceDir, snapshotDir), 253 fs, new Path(copyDir, targetDir)); 254 Set<String> snapshotFiles = verifySnapshot(conf, fs, copyDir, tableName, 255 Bytes.toString(targetName), bypassregionPredicate); 256 assertEquals(filesExpected, snapshotFiles.size()); 257 } 258 259 /** 260 * Check that ExportSnapshot will succeed if something fails but the retry succeed. 261 */ 262 @Test 263 public void testExportRetry() throws Exception { 264 Path copyDir = getLocalDestinationDir(); 265 FileSystem fs = FileSystem.get(copyDir.toUri(), new Configuration()); 266 copyDir = copyDir.makeQualified(fs); 267 Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); 268 conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true); 269 conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 2); 270 conf.setInt("mapreduce.map.maxattempts", 3); 271 testExportFileSystemState(conf, tableName, snapshotName, snapshotName, tableNumFiles, 272 TEST_UTIL.getDefaultRootDirPath(), copyDir, true, getBypassRegionPredicate(), true); 273 } 274 275 /** 276 * Check that ExportSnapshot will fail if we inject failure more times than MR will retry. 277 */ 278 @Test 279 public void testExportFailure() throws Exception { 280 Path copyDir = getLocalDestinationDir(); 281 FileSystem fs = FileSystem.get(copyDir.toUri(), new Configuration()); 282 copyDir = copyDir.makeQualified(fs); 283 Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); 284 conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true); 285 conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 4); 286 conf.setInt("mapreduce.map.maxattempts", 3); 287 testExportFileSystemState(conf, tableName, snapshotName, snapshotName, tableNumFiles, 288 TEST_UTIL.getDefaultRootDirPath(), copyDir, true, getBypassRegionPredicate(), false); 289 } 290 291 /* 292 * verify if the snapshot folder on file-system 1 match the one on file-system 2 293 */ 294 protected static void verifySnapshotDir(final FileSystem fs1, final Path root1, 295 final FileSystem fs2, final Path root2) throws IOException { 296 assertEquals(listFiles(fs1, root1, root1), listFiles(fs2, root2, root2)); 297 } 298 299 protected Set<String> verifySnapshot(final FileSystem fs, final Path rootDir, 300 final TableName tableName, final String snapshotName) throws IOException { 301 return verifySnapshot(TEST_UTIL.getConfiguration(), fs, rootDir, tableName, 302 snapshotName, getBypassRegionPredicate()); 303 } 304 305 /* 306 * Verify if the files exists 307 */ 308 protected static Set<String> verifySnapshot(final Configuration conf, final FileSystem fs, 309 final Path rootDir, final TableName tableName, final String snapshotName, 310 final RegionPredicate bypassregionPredicate) throws IOException { 311 final Path exportedSnapshot = new Path(rootDir, 312 new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); 313 final Set<String> snapshotFiles = new HashSet<>(); 314 final Path exportedArchive = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); 315 SnapshotReferenceUtil.visitReferencedFiles(conf, fs, exportedSnapshot, 316 new SnapshotReferenceUtil.SnapshotVisitor() { 317 @Override 318 public void storeFile(final RegionInfo regionInfo, final String family, 319 final SnapshotRegionManifest.StoreFile storeFile) throws IOException { 320 if (bypassregionPredicate != null && bypassregionPredicate.evaluate(regionInfo)) 321 return; 322 323 String hfile = storeFile.getName(); 324 snapshotFiles.add(hfile); 325 if (storeFile.hasReference()) { 326 // Nothing to do here, we have already the reference embedded 327 } else { 328 verifyNonEmptyFile(new Path(exportedArchive, 329 new Path(FSUtils.getTableDir(new Path("./"), tableName), 330 new Path(regionInfo.getEncodedName(), new Path(family, hfile))))); 331 } 332 } 333 334 private void verifyNonEmptyFile(final Path path) throws IOException { 335 assertTrue(path + " should exists", fs.exists(path)); 336 assertTrue(path + " should not be empty", fs.getFileStatus(path).getLen() > 0); 337 } 338 }); 339 340 // Verify Snapshot description 341 SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, exportedSnapshot); 342 assertTrue(desc.getName().equals(snapshotName)); 343 assertTrue(desc.getTable().equals(tableName.getNameAsString())); 344 return snapshotFiles; 345 } 346 347 private static Set<String> listFiles(final FileSystem fs, final Path root, final Path dir) 348 throws IOException { 349 Set<String> files = new HashSet<>(); 350 int rootPrefix = root.makeQualified(fs).toString().length(); 351 FileStatus[] list = FSUtils.listStatus(fs, dir); 352 if (list != null) { 353 for (FileStatus fstat: list) { 354 LOG.debug(Objects.toString(fstat.getPath())); 355 if (fstat.isDirectory()) { 356 files.addAll(listFiles(fs, root, fstat.getPath())); 357 } else { 358 files.add(fstat.getPath().makeQualified(fs).toString().substring(rootPrefix)); 359 } 360 } 361 } 362 return files; 363 } 364 365 private Path getHdfsDestinationDir() { 366 Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); 367 Path path = new Path(new Path(rootDir, "export-test"), "export-" + System.currentTimeMillis()); 368 LOG.info("HDFS export destination path: " + path); 369 return path; 370 } 371 372 private Path getLocalDestinationDir() { 373 Path path = TEST_UTIL.getDataTestDir("local-export-" + System.currentTimeMillis()); 374 LOG.info("Local export destination path: " + path); 375 return path; 376 } 377 378 private static void removeExportDir(final Path path) throws IOException { 379 FileSystem fs = FileSystem.get(path.toUri(), new Configuration()); 380 fs.delete(path, true); 381 } 382}