001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.backup; 019 020import static org.junit.jupiter.api.Assertions.assertEquals; 021import static org.junit.jupiter.api.Assertions.assertNotEquals; 022import static org.junit.jupiter.api.Assertions.assertThrows; 023import static org.junit.jupiter.api.Assertions.assertTrue; 024 025import java.io.IOException; 026import java.util.ArrayList; 027import java.util.List; 028import org.apache.hadoop.fs.FileSystem; 029import org.apache.hadoop.fs.LocatedFileStatus; 030import org.apache.hadoop.fs.Path; 031import org.apache.hadoop.fs.RemoteIterator; 032import org.apache.hadoop.hbase.HBaseTestingUtil; 033import org.apache.hadoop.hbase.HConstants; 034import org.apache.hadoop.hbase.TableName; 035import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; 036import org.apache.hadoop.hbase.backup.util.BackupUtils; 037import org.apache.hadoop.hbase.client.Admin; 038import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 039import org.apache.hadoop.hbase.client.Connection; 040import org.apache.hadoop.hbase.client.Table; 041import org.apache.hadoop.hbase.client.TableDescriptor; 042import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 043import org.apache.hadoop.hbase.regionserver.HRegion; 044import org.apache.hadoop.hbase.regionserver.LogRoller; 045import org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException; 046import org.apache.hadoop.hbase.testclassification.LargeTests; 047import org.apache.hadoop.hbase.util.Bytes; 048import org.apache.hadoop.hbase.util.EnvironmentEdge; 049import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 050import org.junit.jupiter.api.BeforeAll; 051import org.junit.jupiter.api.Tag; 052import org.junit.jupiter.api.Test; 053 054import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 055 056@Tag(LargeTests.TAG) 057public class TestBackupRestoreExpiry extends TestBackupBase { 058 059 @BeforeAll 060 public static void setUp() throws Exception { 061 TEST_UTIL = new HBaseTestingUtil(); 062 conf1 = TEST_UTIL.getConfiguration(); 063 conf1.setLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY, 30); 064 autoRestoreOnFailure = true; 065 useSecondCluster = false; 066 setUpHelper(); 067 } 068 069 public void ensurePreviousBackupTestsAreCleanedUp() throws Exception { 070 TEST_UTIL.flush(table1); 071 TEST_UTIL.flush(table2); 072 073 TEST_UTIL.truncateTable(table1).close(); 074 TEST_UTIL.truncateTable(table2).close(); 075 076 if (TEST_UTIL.getAdmin().tableExists(table1_restore)) { 077 TEST_UTIL.flush(table1_restore); 078 TEST_UTIL.truncateTable(table1_restore).close(); 079 } 080 081 TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().forEach(rst -> { 082 try { 083 LogRoller walRoller = rst.getRegionServer().getWalRoller(); 084 walRoller.requestRollAll(); 085 walRoller.waitUntilWalRollFinished(); 086 } catch (Exception ignored) { 087 } 088 }); 089 090 try (Table table = TEST_UTIL.getConnection().getTable(table1)) { 091 loadTable(table); 092 } 093 094 try (Table table = TEST_UTIL.getConnection().getTable(table2)) { 095 loadTable(table); 096 } 097 } 098 099 @Test 100 public void testSequentially() throws Exception { 101 try { 102 testRestoreOnExpiredFullBackup(); 103 } catch (Exception e) { 104 throw e; 105 } finally { 106 ensurePreviousBackupTestsAreCleanedUp(); 107 } 108 109 try { 110 testIncrementalBackupOnExpiredFullBackup(); 111 } catch (Exception e) { 112 throw e; 113 } finally { 114 ensurePreviousBackupTestsAreCleanedUp(); 115 } 116 } 117 118 public void testRestoreOnExpiredFullBackup() throws Exception { 119 byte[] mobFam = Bytes.toBytes("mob"); 120 121 List<TableName> tables = Lists.newArrayList(table1); 122 TableDescriptor newTable1Desc = 123 TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder 124 .newBuilder(mobFam).setMobEnabled(true).setMobThreshold(5L).build()).build(); 125 TEST_UTIL.getAdmin().modifyTable(newTable1Desc); 126 127 Connection conn = TEST_UTIL.getConnection(); 128 BackupAdminImpl backupAdmin = new BackupAdminImpl(conn); 129 BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); 130 String fullBackupId = backupAdmin.backupTables(request); 131 assertTrue(checkSucceeded(fullBackupId)); 132 133 TableName[] fromTables = new TableName[] { table1 }; 134 TableName[] toTables = new TableName[] { table1_restore }; 135 136 EnvironmentEdgeManager.injectEdge(new EnvironmentEdge() { 137 // time + 30s 138 @Override 139 public long currentTime() { 140 return System.currentTimeMillis() + (30 * 1000); 141 } 142 }); 143 144 assertThrows(SnapshotTTLExpiredException.class, () -> { 145 backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, fullBackupId, false, 146 fromTables, toTables, true, true)); 147 }); 148 149 EnvironmentEdgeManager.reset(); 150 backupAdmin.close(); 151 } 152 153 public void testIncrementalBackupOnExpiredFullBackup() throws Exception { 154 byte[] mobFam = Bytes.toBytes("mob"); 155 156 List<TableName> tables = Lists.newArrayList(table1); 157 TableDescriptor newTable1Desc = 158 TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder 159 .newBuilder(mobFam).setMobEnabled(true).setMobThreshold(5L).build()).build(); 160 TEST_UTIL.getAdmin().modifyTable(newTable1Desc); 161 162 Connection conn = TEST_UTIL.getConnection(); 163 BackupAdminImpl backupAdmin = new BackupAdminImpl(conn); 164 BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); 165 String fullBackupId = backupAdmin.backupTables(request); 166 assertTrue(checkSucceeded(fullBackupId)); 167 168 TableName[] fromTables = new TableName[] { table1 }; 169 TableName[] toTables = new TableName[] { table1_restore }; 170 171 List<LocatedFileStatus> preRestoreBackupFiles = getBackupFiles(); 172 backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, fullBackupId, false, 173 fromTables, toTables, true, true)); 174 List<LocatedFileStatus> postRestoreBackupFiles = getBackupFiles(); 175 176 // Check that the backup files are the same before and after the restore process 177 assertEquals(postRestoreBackupFiles, preRestoreBackupFiles); 178 assertEquals(NB_ROWS_IN_BATCH, TEST_UTIL.countRows(table1_restore)); 179 180 int ROWS_TO_ADD = 1_000; 181 // different IDs so that rows don't overlap 182 insertIntoTable(conn, table1, famName, 3, ROWS_TO_ADD); 183 insertIntoTable(conn, table1, mobFam, 4, ROWS_TO_ADD); 184 185 Admin admin = conn.getAdmin(); 186 List<HRegion> currentRegions = TEST_UTIL.getHBaseCluster().getRegions(table1); 187 for (HRegion region : currentRegions) { 188 byte[] name = region.getRegionInfo().getEncodedNameAsBytes(); 189 admin.splitRegionAsync(name).get(); 190 } 191 192 TEST_UTIL.waitTableAvailable(table1); 193 194 // Make sure we've split regions 195 assertNotEquals(currentRegions, TEST_UTIL.getHBaseCluster().getRegions(table1)); 196 197 EnvironmentEdgeManager.injectEdge(new EnvironmentEdge() { 198 // time + 30s 199 @Override 200 public long currentTime() { 201 return System.currentTimeMillis() + (30 * 1000); 202 } 203 }); 204 205 IOException e = assertThrows(IOException.class, () -> { 206 backupAdmin 207 .backupTables(createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR)); 208 }); 209 assertTrue(e.getCause() instanceof SnapshotTTLExpiredException); 210 211 EnvironmentEdgeManager.reset(); 212 backupAdmin.close(); 213 } 214 215 private List<LocatedFileStatus> getBackupFiles() throws IOException { 216 FileSystem fs = TEST_UTIL.getTestFileSystem(); 217 RemoteIterator<LocatedFileStatus> iter = fs.listFiles(new Path(BACKUP_ROOT_DIR), true); 218 List<LocatedFileStatus> files = new ArrayList<>(); 219 220 while (iter.hasNext()) { 221 files.add(iter.next()); 222 } 223 224 return files; 225 } 226}