001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertTrue; 022 023import java.util.ArrayList; 024import java.util.Collections; 025import java.util.List; 026import org.apache.hadoop.conf.Configuration; 027import org.apache.hadoop.fs.FileSystem; 028import org.apache.hadoop.fs.Path; 029import org.apache.hadoop.hbase.HBaseClassTestRule; 030import org.apache.hadoop.hbase.HBaseTestingUtility; 031import org.apache.hadoop.hbase.HColumnDescriptor; 032import org.apache.hadoop.hbase.HConstants; 033import org.apache.hadoop.hbase.HTableDescriptor; 034import org.apache.hadoop.hbase.TableName; 035import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; 036import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; 037import org.apache.hadoop.hbase.regionserver.BloomType; 038import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; 039import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; 040import org.apache.hadoop.hbase.testclassification.ClientTests; 041import org.apache.hadoop.hbase.testclassification.MediumTests; 042import org.apache.hadoop.hbase.util.Bytes; 043import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 044import org.junit.After; 045import org.junit.AfterClass; 046import org.junit.Before; 047import org.junit.BeforeClass; 048import org.junit.ClassRule; 049import org.junit.Test; 050import org.junit.experimental.categories.Category; 051import org.slf4j.Logger; 052import org.slf4j.LoggerFactory; 053 054/** 055 * Test class to verify that metadata is consistent before and after a snapshot attempt. 056 */ 057@Category({ MediumTests.class, ClientTests.class }) 058public class TestSnapshotMetadata { 059 060 @ClassRule 061 public static final HBaseClassTestRule CLASS_RULE = 062 HBaseClassTestRule.forClass(TestSnapshotMetadata.class); 063 064 private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotMetadata.class); 065 066 private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); 067 private static final int NUM_RS = 2; 068 private static final String STRING_TABLE_NAME = "TestSnapshotMetadata"; 069 070 private static final String MAX_VERSIONS_FAM_STR = "fam_max_columns"; 071 private static final byte[] MAX_VERSIONS_FAM = Bytes.toBytes(MAX_VERSIONS_FAM_STR); 072 073 private static final String COMPRESSED_FAM_STR = "fam_compressed"; 074 private static final byte[] COMPRESSED_FAM = Bytes.toBytes(COMPRESSED_FAM_STR); 075 076 private static final String BLOCKSIZE_FAM_STR = "fam_blocksize"; 077 private static final byte[] BLOCKSIZE_FAM = Bytes.toBytes(BLOCKSIZE_FAM_STR); 078 079 private static final String BLOOMFILTER_FAM_STR = "fam_bloomfilter"; 080 private static final byte[] BLOOMFILTER_FAM = Bytes.toBytes(BLOOMFILTER_FAM_STR); 081 082 private static final String TEST_CONF_CUSTOM_VALUE = "TestCustomConf"; 083 private static final String TEST_CUSTOM_VALUE = "TestCustomValue"; 084 085 private static final byte[][] families = 086 { MAX_VERSIONS_FAM, BLOOMFILTER_FAM, COMPRESSED_FAM, BLOCKSIZE_FAM }; 087 088 private static final DataBlockEncoding DATA_BLOCK_ENCODING_TYPE = DataBlockEncoding.FAST_DIFF; 089 private static final BloomType BLOOM_TYPE = BloomType.ROW; 090 private static final int BLOCK_SIZE = 98; 091 private static final int MAX_VERSIONS = 8; 092 093 private Admin admin; 094 private String originalTableDescription; 095 private HTableDescriptor originalTableDescriptor; 096 TableName originalTableName; 097 098 private static FileSystem fs; 099 private static Path rootDir; 100 101 @BeforeClass 102 public static void setupCluster() throws Exception { 103 setupConf(UTIL.getConfiguration()); 104 UTIL.startMiniCluster(NUM_RS); 105 106 fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); 107 rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); 108 } 109 110 @AfterClass 111 public static void cleanupTest() throws Exception { 112 try { 113 UTIL.shutdownMiniCluster(); 114 } catch (Exception e) { 115 LOG.warn("failure shutting down cluster", e); 116 } 117 } 118 119 private static void setupConf(Configuration conf) { 120 // enable snapshot support 121 conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); 122 // disable the ui 123 conf.setInt("hbase.regionsever.info.port", -1); 124 // change the flush size to a small amount, regulating number of store files 125 conf.setInt("hbase.hregion.memstore.flush.size", 25000); 126 // so make sure we get a compaction when doing a load, but keep around 127 // some files in the store 128 conf.setInt("hbase.hstore.compaction.min", 10); 129 conf.setInt("hbase.hstore.compactionThreshold", 10); 130 // block writes if we get to 12 store files 131 conf.setInt("hbase.hstore.blockingStoreFiles", 12); 132 conf.setInt("hbase.regionserver.msginterval", 100); 133 conf.setBoolean("hbase.master.enabletable.roundrobin", true); 134 // Avoid potentially aggressive splitting which would cause snapshot to fail 135 conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 136 ConstantSizeRegionSplitPolicy.class.getName()); 137 } 138 139 @Before 140 public void setup() throws Exception { 141 admin = UTIL.getAdmin(); 142 createTableWithNonDefaultProperties(); 143 } 144 145 @After 146 public void tearDown() throws Exception { 147 SnapshotTestingUtils.deleteAllSnapshots(admin); 148 } 149 150 /* 151 * Create a table that has non-default properties so we can see if they hold 152 */ 153 private void createTableWithNonDefaultProperties() throws Exception { 154 final long startTime = EnvironmentEdgeManager.currentTime(); 155 final String sourceTableNameAsString = STRING_TABLE_NAME + startTime; 156 originalTableName = TableName.valueOf(sourceTableNameAsString); 157 158 // enable replication on a column family 159 HColumnDescriptor maxVersionsColumn = new HColumnDescriptor(MAX_VERSIONS_FAM); 160 HColumnDescriptor bloomFilterColumn = new HColumnDescriptor(BLOOMFILTER_FAM); 161 HColumnDescriptor dataBlockColumn = new HColumnDescriptor(COMPRESSED_FAM); 162 HColumnDescriptor blockSizeColumn = new HColumnDescriptor(BLOCKSIZE_FAM); 163 164 maxVersionsColumn.setMaxVersions(MAX_VERSIONS); 165 bloomFilterColumn.setBloomFilterType(BLOOM_TYPE); 166 dataBlockColumn.setDataBlockEncoding(DATA_BLOCK_ENCODING_TYPE); 167 blockSizeColumn.setBlocksize(BLOCK_SIZE); 168 169 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(sourceTableNameAsString)); 170 htd.addFamily(maxVersionsColumn); 171 htd.addFamily(bloomFilterColumn); 172 htd.addFamily(dataBlockColumn); 173 htd.addFamily(blockSizeColumn); 174 htd.setValue(TEST_CUSTOM_VALUE, TEST_CUSTOM_VALUE); 175 htd.setConfiguration(TEST_CONF_CUSTOM_VALUE, TEST_CONF_CUSTOM_VALUE); 176 assertTrue(htd.getConfiguration().size() > 0); 177 178 admin.createTable(htd); 179 Table original = UTIL.getConnection().getTable(originalTableName); 180 originalTableName = TableName.valueOf(sourceTableNameAsString); 181 originalTableDescriptor = admin.getTableDescriptor(originalTableName); 182 originalTableDescription = originalTableDescriptor.toStringCustomizedValues(); 183 184 original.close(); 185 } 186 187 /** 188 * Verify that the describe for a cloned table matches the describe from the original. 189 */ 190 @Test 191 public void testDescribeMatchesAfterClone() throws Exception { 192 // Clone the original table 193 final String clonedTableNameAsString = "clone" + originalTableName; 194 final TableName clonedTableName = TableName.valueOf(clonedTableNameAsString); 195 final String snapshotNameAsString = 196 "snapshot" + originalTableName + EnvironmentEdgeManager.currentTime(); 197 final byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); 198 199 // restore the snapshot into a cloned table and examine the output 200 List<byte[]> familiesList = new ArrayList<>(); 201 Collections.addAll(familiesList, families); 202 203 // Create a snapshot in which all families are empty 204 SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableName, null, familiesList, 205 snapshotNameAsString, rootDir, fs, /* onlineSnapshot= */ false); 206 207 admin.cloneSnapshot(snapshotName, clonedTableName); 208 Table clonedTable = UTIL.getConnection().getTable(clonedTableName); 209 HTableDescriptor cloneHtd = admin.getTableDescriptor(clonedTableName); 210 assertEquals(originalTableDescription.replace(originalTableName.getNameAsString(), 211 clonedTableNameAsString), cloneHtd.toStringCustomizedValues()); 212 213 // Verify the custom fields 214 assertEquals(originalTableDescriptor.getValues().size(), cloneHtd.getValues().size()); 215 assertEquals(originalTableDescriptor.getConfiguration().size(), 216 cloneHtd.getConfiguration().size()); 217 assertEquals(TEST_CUSTOM_VALUE, cloneHtd.getValue(TEST_CUSTOM_VALUE)); 218 assertEquals(TEST_CONF_CUSTOM_VALUE, cloneHtd.getConfigurationValue(TEST_CONF_CUSTOM_VALUE)); 219 assertEquals(originalTableDescriptor.getValues(), cloneHtd.getValues()); 220 assertEquals(originalTableDescriptor.getConfiguration(), cloneHtd.getConfiguration()); 221 222 admin.enableTable(originalTableName); 223 clonedTable.close(); 224 } 225 226 /** 227 * Verify that the describe for a restored table matches the describe for one the original. 228 */ 229 @Test 230 public void testDescribeMatchesAfterRestore() throws Exception { 231 runRestoreWithAdditionalMetadata(false); 232 } 233 234 /** 235 * Verify that if metadata changed after a snapshot was taken, that the old metadata replaces the 236 * new metadata during a restore 237 */ 238 @Test 239 public void testDescribeMatchesAfterMetadataChangeAndRestore() throws Exception { 240 runRestoreWithAdditionalMetadata(true); 241 } 242 243 /** 244 * Verify that when the table is empty, making metadata changes after the restore does not affect 245 * the restored table's original metadata n 246 */ 247 @Test 248 public void testDescribeOnEmptyTableMatchesAfterMetadataChangeAndRestore() throws Exception { 249 runRestoreWithAdditionalMetadata(true, false); 250 } 251 252 private void runRestoreWithAdditionalMetadata(boolean changeMetadata) throws Exception { 253 runRestoreWithAdditionalMetadata(changeMetadata, true); 254 } 255 256 private void runRestoreWithAdditionalMetadata(boolean changeMetadata, boolean addData) 257 throws Exception { 258 259 if (admin.isTableDisabled(originalTableName)) { 260 admin.enableTable(originalTableName); 261 } 262 263 // populate it with data 264 final byte[] familyForUpdate = BLOCKSIZE_FAM; 265 266 List<byte[]> familiesWithDataList = new ArrayList<>(); 267 List<byte[]> emptyFamiliesList = new ArrayList<>(); 268 if (addData) { 269 Table original = UTIL.getConnection().getTable(originalTableName); 270 UTIL.loadTable(original, familyForUpdate); // family arbitrarily chosen 271 original.close(); 272 273 for (byte[] family : families) { 274 if (family != familyForUpdate) { 275 emptyFamiliesList.add(family); 276 } 277 } 278 familiesWithDataList.add(familyForUpdate); 279 } else { 280 Collections.addAll(emptyFamiliesList, families); 281 } 282 283 // take a "disabled" snapshot 284 final String snapshotNameAsString = 285 "snapshot" + originalTableName + EnvironmentEdgeManager.currentTime(); 286 final byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); 287 288 SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableName, familiesWithDataList, 289 emptyFamiliesList, snapshotNameAsString, rootDir, fs, /* onlineSnapshot= */ false); 290 291 admin.enableTable(originalTableName); 292 293 if (changeMetadata) { 294 final String newFamilyNameAsString = "newFamily" + EnvironmentEdgeManager.currentTime(); 295 final byte[] newFamilyName = Bytes.toBytes(newFamilyNameAsString); 296 297 admin.disableTable(originalTableName); 298 HColumnDescriptor hcd = new HColumnDescriptor(newFamilyName); 299 admin.addColumnFamily(originalTableName, hcd); 300 assertTrue("New column family was not added.", 301 admin.getTableDescriptor(originalTableName).toString().contains(newFamilyNameAsString)); 302 } 303 304 // restore it 305 if (!admin.isTableDisabled(originalTableName)) { 306 admin.disableTable(originalTableName); 307 } 308 309 admin.restoreSnapshot(snapshotName); 310 admin.enableTable(originalTableName); 311 312 // verify that the descrption is reverted 313 Table original = UTIL.getConnection().getTable(originalTableName); 314 try { 315 assertTrue(originalTableDescriptor.equals(admin.getTableDescriptor(originalTableName))); 316 assertTrue(originalTableDescriptor.equals(original.getTableDescriptor())); 317 } finally { 318 original.close(); 319 } 320 } 321}