001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import java.io.IOException; 021import org.apache.commons.lang3.StringUtils; 022import org.apache.hadoop.conf.Configuration; 023import org.apache.hadoop.fs.Path; 024import org.apache.hadoop.hbase.HBaseClassTestRule; 025import org.apache.hadoop.hbase.HBaseTestingUtility; 026import org.apache.hadoop.hbase.TableName; 027import org.apache.hadoop.hbase.client.RegionInfo; 028import org.apache.hadoop.hbase.client.Table; 029import org.apache.hadoop.hbase.testclassification.MediumTests; 030import org.apache.hadoop.hbase.testclassification.RegionServerTests; 031import org.apache.hadoop.hbase.util.Bytes; 032import org.apache.hadoop.hbase.util.FSUtils; 033import org.apache.hadoop.hdfs.DFSClient; 034import org.junit.After; 035import org.junit.Assert; 036import org.junit.Before; 037import org.junit.ClassRule; 038import org.junit.Test; 039import org.junit.experimental.categories.Category; 040 041@Category({RegionServerTests.class, MediumTests.class}) 042public class TestHdfsSnapshotHRegion { 043 044 @ClassRule 045 public static final HBaseClassTestRule CLASS_RULE = 046 HBaseClassTestRule.forClass(TestHdfsSnapshotHRegion.class); 047 048 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 049 private static final String SNAPSHOT_NAME = "foo_snapshot"; 050 private Table table; 051 public static final TableName TABLE_NAME = TableName.valueOf("foo"); 052 public static final byte[] FAMILY = Bytes.toBytes("f1"); 053 private DFSClient client; 054 private String baseDir; 055 056 057 @Before 058 public void setUp() throws Exception { 059 Configuration c = TEST_UTIL.getConfiguration(); 060 c.setBoolean("dfs.support.append", true); 061 TEST_UTIL.startMiniCluster(1); 062 table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, FAMILY); 063 TEST_UTIL.loadTable(table, FAMILY); 064 065 // setup the hdfssnapshots 066 client = new DFSClient(TEST_UTIL.getDFSCluster().getURI(), TEST_UTIL.getConfiguration()); 067 String fullUrIPath = TEST_UTIL.getDefaultRootDirPath().toString(); 068 String uriString = TEST_UTIL.getTestFileSystem().getUri().toString(); 069 baseDir = StringUtils.removeStart(fullUrIPath, uriString); 070 client.allowSnapshot(baseDir); 071 } 072 073 @After 074 public void tearDown() throws Exception { 075 client.deleteSnapshot(baseDir, SNAPSHOT_NAME); 076 TEST_UTIL.shutdownMiniCluster(); 077 } 078 079 @Test 080 public void testOpeningReadOnlyRegionBasic() throws Exception { 081 String snapshotDir = client.createSnapshot(baseDir, SNAPSHOT_NAME); 082 RegionInfo firstRegion = TEST_UTIL.getConnection().getRegionLocator( 083 table.getName()).getAllRegionLocations().stream().findFirst().get().getRegion(); 084 Path tableDir = FSUtils.getTableDir(new Path(snapshotDir), TABLE_NAME); 085 HRegion snapshottedRegion = openSnapshotRegion(firstRegion, tableDir); 086 Assert.assertNotNull(snapshottedRegion); 087 snapshottedRegion.close(); 088 } 089 090 @Test 091 public void testSnapshottingWithTmpSplitsAndMergeDirectoriesPresent() throws Exception { 092 // lets get a region and create those directories and make sure we ignore them 093 RegionInfo firstRegion = TEST_UTIL.getConnection().getRegionLocator( 094 table.getName()).getAllRegionLocations().stream().findFirst().get().getRegion(); 095 String encodedName = firstRegion.getEncodedName(); 096 Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLE_NAME); 097 Path regionDirectoryPath = new Path(tableDir, encodedName); 098 TEST_UTIL.getTestFileSystem().create( 099 new Path(regionDirectoryPath, HRegionFileSystem.REGION_TEMP_DIR)); 100 TEST_UTIL.getTestFileSystem().create( 101 new Path(regionDirectoryPath, HRegionFileSystem.REGION_SPLITS_DIR)); 102 TEST_UTIL.getTestFileSystem().create( 103 new Path(regionDirectoryPath, HRegionFileSystem.REGION_MERGES_DIR)); 104 // now snapshot 105 String snapshotDir = client.createSnapshot(baseDir, "foo_snapshot"); 106 // everything should still open just fine 107 HRegion snapshottedRegion = openSnapshotRegion(firstRegion, 108 FSUtils.getTableDir(new Path(snapshotDir), TABLE_NAME)); 109 Assert.assertNotNull(snapshottedRegion); // no errors and the region should open 110 snapshottedRegion.close(); 111 } 112 113 private HRegion openSnapshotRegion(RegionInfo firstRegion, Path tableDir) throws IOException { 114 return HRegion.openReadOnlyFileSystemHRegion( 115 TEST_UTIL.getConfiguration(), 116 TEST_UTIL.getTestFileSystem(), 117 tableDir, 118 firstRegion, 119 table.getDescriptor() 120 ); 121 } 122}