001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.backup; 019 020import static org.junit.jupiter.api.Assertions.assertEquals; 021import static org.junit.jupiter.api.Assertions.assertNotEquals; 022import static org.junit.jupiter.api.Assertions.assertTrue; 023 024import java.util.List; 025import org.apache.hadoop.fs.LocatedFileStatus; 026import org.apache.hadoop.hbase.TableName; 027import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; 028import org.apache.hadoop.hbase.backup.util.BackupUtils; 029import org.apache.hadoop.hbase.client.Admin; 030import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 031import org.apache.hadoop.hbase.client.Connection; 032import org.apache.hadoop.hbase.client.TableDescriptor; 033import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 034import org.apache.hadoop.hbase.regionserver.HRegion; 035import org.apache.hadoop.hbase.testclassification.LargeTests; 036import org.apache.hadoop.hbase.util.Bytes; 037import org.junit.jupiter.api.Tag; 038import org.junit.jupiter.api.Test; 039 040import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 041 042@Tag(LargeTests.TAG) 043public class TestIncrementalBackupRestoreWithOriginalSplits 044 extends IncrementalBackupRestoreTestBase { 045 046 @Test 047 public void testIncBackupRestoreWithOriginalSplits() throws Exception { 048 byte[] mobFam = Bytes.toBytes("mob"); 049 050 List<TableName> tables = Lists.newArrayList(table1); 051 TableDescriptor newTable1Desc = 052 TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder 053 .newBuilder(mobFam).setMobEnabled(true).setMobThreshold(5L).build()).build(); 054 TEST_UTIL.getAdmin().modifyTable(newTable1Desc); 055 056 try (Connection conn = TEST_UTIL.getConnection(); 057 BackupAdminImpl backupAdmin = new BackupAdminImpl(conn); Admin admin = conn.getAdmin()) { 058 BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); 059 String fullBackupId = backupAdmin.backupTables(request); 060 assertTrue(checkSucceeded(fullBackupId)); 061 062 TableName[] fromTables = new TableName[] { table1 }; 063 TableName[] toTables = new TableName[] { table1_restore }; 064 065 List<LocatedFileStatus> preRestoreBackupFiles = getBackupFiles(); 066 backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, fullBackupId, false, 067 fromTables, toTables, true, true)); 068 List<LocatedFileStatus> postRestoreBackupFiles = getBackupFiles(); 069 070 // Check that the backup files are the same before and after the restore process 071 assertEquals(postRestoreBackupFiles, preRestoreBackupFiles); 072 assertEquals(TEST_UTIL.countRows(table1_restore), NB_ROWS_IN_BATCH); 073 074 int ROWS_TO_ADD = 1_000; 075 // different IDs so that rows don't overlap 076 insertIntoTable(conn, table1, famName, 3, ROWS_TO_ADD); 077 insertIntoTable(conn, table1, mobFam, 4, ROWS_TO_ADD); 078 List<HRegion> currentRegions = TEST_UTIL.getHBaseCluster().getRegions(table1); 079 for (HRegion region : currentRegions) { 080 byte[] name = region.getRegionInfo().getEncodedNameAsBytes(); 081 admin.splitRegionAsync(name).get(); 082 } 083 084 TEST_UTIL.waitTableAvailable(table1); 085 086 // Make sure we've split regions 087 assertNotEquals(currentRegions, TEST_UTIL.getHBaseCluster().getRegions(table1)); 088 089 request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); 090 String incrementalBackupId = backupAdmin.backupTables(request); 091 assertTrue(checkSucceeded(incrementalBackupId)); 092 preRestoreBackupFiles = getBackupFiles(); 093 backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, incrementalBackupId, 094 false, fromTables, toTables, true, true)); 095 postRestoreBackupFiles = getBackupFiles(); 096 assertEquals(postRestoreBackupFiles, preRestoreBackupFiles); 097 assertEquals(NB_ROWS_IN_BATCH + ROWS_TO_ADD + ROWS_TO_ADD, 098 TEST_UTIL.countRows(table1_restore)); 099 100 // test bulkloads 101 HRegion regionToBulkload = TEST_UTIL.getHBaseCluster().getRegions(table1).get(0); 102 String regionName = regionToBulkload.getRegionInfo().getEncodedName(); 103 104 insertIntoTable(conn, table1, famName, 5, ROWS_TO_ADD); 105 insertIntoTable(conn, table1, mobFam, 6, ROWS_TO_ADD); 106 107 doBulkload(table1, regionName, famName, mobFam); 108 109 // we need to major compact the regions to make sure there are no references 110 // and the regions are once again splittable 111 TEST_UTIL.compact(true); 112 TEST_UTIL.flush(); 113 TEST_UTIL.waitTableAvailable(table1); 114 115 for (HRegion region : TEST_UTIL.getHBaseCluster().getRegions(table1)) { 116 if (region.isSplittable()) { 117 admin.splitRegionAsync(region.getRegionInfo().getEncodedNameAsBytes()).get(); 118 } 119 } 120 121 request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); 122 incrementalBackupId = backupAdmin.backupTables(request); 123 assertTrue(checkSucceeded(incrementalBackupId)); 124 125 preRestoreBackupFiles = getBackupFiles(); 126 backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, incrementalBackupId, 127 false, fromTables, toTables, true, true)); 128 postRestoreBackupFiles = getBackupFiles(); 129 130 assertEquals(postRestoreBackupFiles, preRestoreBackupFiles); 131 132 int rowsExpected = TEST_UTIL.countRows(table1); 133 int rowsActual = TEST_UTIL.countRows(table1_restore); 134 135 assertEquals(rowsExpected, rowsActual); 136 } 137 } 138}