001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.snapshot; 019 020import static org.junit.jupiter.api.Assertions.assertEquals; 021 022import java.util.List; 023import java.util.stream.Collectors; 024import org.apache.hadoop.conf.Configuration; 025import org.apache.hadoop.fs.Path; 026import org.apache.hadoop.hbase.TableName; 027import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 028import org.apache.hadoop.hbase.client.Put; 029import org.apache.hadoop.hbase.client.RegionInfo; 030import org.apache.hadoop.hbase.client.Table; 031import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 032import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool; 033import org.apache.hadoop.hbase.util.Bytes; 034import org.apache.hadoop.hbase.util.HFileTestUtil; 035import org.junit.jupiter.api.TestTemplate; 036 037import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 038 039public class ExportFileSystemStateWithMergeOrSplitRegionTestBase extends ExportSnapshotTestBase { 040 041 protected ExportFileSystemStateWithMergeOrSplitRegionTestBase(boolean mob) { 042 super(mob); 043 } 044 045 @TestTemplate 046 public void testExportFileSystemStateWithMergeRegion() throws Exception { 047 // disable compaction 048 admin.compactionSwitch(false, 049 admin.getRegionServers().stream().map(a -> a.getServerName()).collect(Collectors.toList())); 050 // create Table 051 String suffix = mob ? methodName + "-mob" : methodName; 052 TableName tableName0 = TableName.valueOf("testtb-" + suffix + "-1"); 053 String snapshotName0 = "snaptb0-" + suffix + "-1"; 054 admin.createTable( 055 TableDescriptorBuilder.newBuilder(tableName0) 056 .setColumnFamilies( 057 Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())) 058 .build(), 059 new byte[][] { Bytes.toBytes("2") }); 060 // put some data 061 try (Table table = admin.getConnection().getTable(tableName0)) { 062 table.put(new Put(Bytes.toBytes("1")).addColumn(FAMILY, null, Bytes.toBytes("1"))); 063 table.put(new Put(Bytes.toBytes("2")).addColumn(FAMILY, null, Bytes.toBytes("2"))); 064 } 065 List<RegionInfo> regions = admin.getRegions(tableName0); 066 assertEquals(2, regions.size()); 067 tableNumFiles = regions.size(); 068 // merge region 069 admin.mergeRegionsAsync(new byte[][] { regions.get(0).getEncodedNameAsBytes(), 070 regions.get(1).getEncodedNameAsBytes() }, true).get(); 071 // take a snapshot 072 admin.snapshot(snapshotName0, tableName0); 073 // export snapshot and verify 074 testExportFileSystemState(tableName0, snapshotName0, snapshotName0, tableNumFiles); 075 } 076 077 @TestTemplate 078 public void testExportFileSystemStateWithSplitRegion() throws Exception { 079 // disable compaction 080 admin.compactionSwitch(false, 081 admin.getRegionServers().stream().map(a -> a.getServerName()).collect(Collectors.toList())); 082 // create Table 083 String suffix = mob ? methodName + "-mob" : methodName; 084 TableName splitTableName = TableName.valueOf(suffix); 085 String splitTableSnap = "snapshot-" + suffix; 086 admin.createTable(TableDescriptorBuilder.newBuilder(splitTableName).setColumnFamilies( 087 Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())).build()); 088 089 Path output = TEST_UTIL.getDataTestDir("output/cf"); 090 TEST_UTIL.getTestFileSystem().mkdirs(output); 091 // Create and load a large hfile to ensure the execution time of MR job. 092 HFileTestUtil.createHFile(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(), 093 new Path(output, "test_file"), FAMILY, Bytes.toBytes("q"), Bytes.toBytes("1"), 094 Bytes.toBytes("9"), 9999999); 095 BulkLoadHFilesTool tool = new BulkLoadHFilesTool(TEST_UTIL.getConfiguration()); 096 tool.run(new String[] { output.getParent().toString(), splitTableName.getNameAsString() }); 097 098 List<RegionInfo> regions = admin.getRegions(splitTableName); 099 assertEquals(1, regions.size()); 100 tableNumFiles = regions.size(); 101 102 // split region 103 admin.split(splitTableName, Bytes.toBytes("5")); 104 regions = admin.getRegions(splitTableName); 105 assertEquals(2, regions.size()); 106 107 // take a snapshot 108 admin.snapshot(splitTableSnap, splitTableName); 109 // export snapshot and verify 110 Configuration tmpConf = TEST_UTIL.getConfiguration(); 111 // Decrease the buffer size of copier to avoid the export task finished shortly 112 tmpConf.setInt("snapshot.export.buffer.size", 1); 113 // Decrease the maximum files of each mapper to ensure the three files(1 hfile + 2 reference 114 // files) 115 // copied in different mappers concurrently. 116 tmpConf.setInt("snapshot.export.default.map.group", 1); 117 testExportFileSystemState(tmpConf, splitTableName, splitTableSnap, splitTableSnap, 118 tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), getHdfsDestinationDir(), false, false, 119 getBypassRegionPredicate(), true, false); 120 } 121}