001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import static org.junit.Assert.assertNotEquals;
021import static org.junit.Assert.assertThrows;
022import static org.junit.Assert.assertTrue;
023
024import java.io.IOException;
025import java.util.ArrayList;
026import java.util.List;
027import org.apache.hadoop.fs.FileSystem;
028import org.apache.hadoop.fs.LocatedFileStatus;
029import org.apache.hadoop.fs.Path;
030import org.apache.hadoop.fs.RemoteIterator;
031import org.apache.hadoop.hbase.HBaseClassTestRule;
032import org.apache.hadoop.hbase.HBaseTestingUtil;
033import org.apache.hadoop.hbase.HConstants;
034import org.apache.hadoop.hbase.TableName;
035import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
036import org.apache.hadoop.hbase.backup.util.BackupUtils;
037import org.apache.hadoop.hbase.client.Admin;
038import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
039import org.apache.hadoop.hbase.client.Connection;
040import org.apache.hadoop.hbase.client.Table;
041import org.apache.hadoop.hbase.client.TableDescriptor;
042import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
043import org.apache.hadoop.hbase.regionserver.HRegion;
044import org.apache.hadoop.hbase.regionserver.LogRoller;
045import org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException;
046import org.apache.hadoop.hbase.testclassification.LargeTests;
047import org.apache.hadoop.hbase.util.Bytes;
048import org.apache.hadoop.hbase.util.EnvironmentEdge;
049import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
050import org.junit.Assert;
051import org.junit.BeforeClass;
052import org.junit.ClassRule;
053import org.junit.Test;
054import org.junit.experimental.categories.Category;
055
056import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
057
058@Category(LargeTests.class)
059public class TestBackupRestoreExpiry extends TestBackupBase {
060
061  @ClassRule
062  public static final HBaseClassTestRule CLASS_RULE =
063    HBaseClassTestRule.forClass(TestBackupRestoreExpiry.class);
064
065  @BeforeClass
066  public static void setUp() throws Exception {
067    TEST_UTIL = new HBaseTestingUtil();
068    conf1 = TEST_UTIL.getConfiguration();
069    conf1.setLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY, 30);
070    autoRestoreOnFailure = true;
071    useSecondCluster = false;
072    setUpHelper();
073  }
074
075  public void ensurePreviousBackupTestsAreCleanedUp() throws Exception {
076    TEST_UTIL.flush(table1);
077    TEST_UTIL.flush(table2);
078
079    TEST_UTIL.truncateTable(table1).close();
080    TEST_UTIL.truncateTable(table2).close();
081
082    if (TEST_UTIL.getAdmin().tableExists(table1_restore)) {
083      TEST_UTIL.flush(table1_restore);
084      TEST_UTIL.truncateTable(table1_restore).close();
085    }
086
087    TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().forEach(rst -> {
088      try {
089        LogRoller walRoller = rst.getRegionServer().getWalRoller();
090        walRoller.requestRollAll();
091        walRoller.waitUntilWalRollFinished();
092      } catch (Exception ignored) {
093      }
094    });
095
096    try (Table table = TEST_UTIL.getConnection().getTable(table1)) {
097      loadTable(table);
098    }
099
100    try (Table table = TEST_UTIL.getConnection().getTable(table2)) {
101      loadTable(table);
102    }
103  }
104
105  @Test
106  public void testSequentially() throws Exception {
107    try {
108      testRestoreOnExpiredFullBackup();
109    } catch (Exception e) {
110      throw e;
111    } finally {
112      ensurePreviousBackupTestsAreCleanedUp();
113    }
114
115    try {
116      testIncrementalBackupOnExpiredFullBackup();
117    } catch (Exception e) {
118      throw e;
119    } finally {
120      ensurePreviousBackupTestsAreCleanedUp();
121    }
122  }
123
124  public void testRestoreOnExpiredFullBackup() throws Exception {
125    byte[] mobFam = Bytes.toBytes("mob");
126
127    List<TableName> tables = Lists.newArrayList(table1);
128    TableDescriptor newTable1Desc =
129      TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder
130        .newBuilder(mobFam).setMobEnabled(true).setMobThreshold(5L).build()).build();
131    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
132
133    Connection conn = TEST_UTIL.getConnection();
134    BackupAdminImpl backupAdmin = new BackupAdminImpl(conn);
135    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
136    String fullBackupId = backupAdmin.backupTables(request);
137    assertTrue(checkSucceeded(fullBackupId));
138
139    TableName[] fromTables = new TableName[] { table1 };
140    TableName[] toTables = new TableName[] { table1_restore };
141
142    EnvironmentEdgeManager.injectEdge(new EnvironmentEdge() {
143      // time + 30s
144      @Override
145      public long currentTime() {
146        return System.currentTimeMillis() + (30 * 1000);
147      }
148    });
149
150    assertThrows(SnapshotTTLExpiredException.class, () -> {
151      backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, fullBackupId, false,
152        fromTables, toTables, true, true));
153    });
154
155    EnvironmentEdgeManager.reset();
156    backupAdmin.close();
157  }
158
159  public void testIncrementalBackupOnExpiredFullBackup() throws Exception {
160    byte[] mobFam = Bytes.toBytes("mob");
161
162    List<TableName> tables = Lists.newArrayList(table1);
163    TableDescriptor newTable1Desc =
164      TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder
165        .newBuilder(mobFam).setMobEnabled(true).setMobThreshold(5L).build()).build();
166    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
167
168    Connection conn = TEST_UTIL.getConnection();
169    BackupAdminImpl backupAdmin = new BackupAdminImpl(conn);
170    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
171    String fullBackupId = backupAdmin.backupTables(request);
172    assertTrue(checkSucceeded(fullBackupId));
173
174    TableName[] fromTables = new TableName[] { table1 };
175    TableName[] toTables = new TableName[] { table1_restore };
176
177    List<LocatedFileStatus> preRestoreBackupFiles = getBackupFiles();
178    backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, fullBackupId, false,
179      fromTables, toTables, true, true));
180    List<LocatedFileStatus> postRestoreBackupFiles = getBackupFiles();
181
182    // Check that the backup files are the same before and after the restore process
183    Assert.assertEquals(postRestoreBackupFiles, preRestoreBackupFiles);
184    Assert.assertEquals(TEST_UTIL.countRows(table1_restore), NB_ROWS_IN_BATCH);
185
186    int ROWS_TO_ADD = 1_000;
187    // different IDs so that rows don't overlap
188    insertIntoTable(conn, table1, famName, 3, ROWS_TO_ADD);
189    insertIntoTable(conn, table1, mobFam, 4, ROWS_TO_ADD);
190
191    Admin admin = conn.getAdmin();
192    List<HRegion> currentRegions = TEST_UTIL.getHBaseCluster().getRegions(table1);
193    for (HRegion region : currentRegions) {
194      byte[] name = region.getRegionInfo().getEncodedNameAsBytes();
195      admin.splitRegionAsync(name).get();
196    }
197
198    TEST_UTIL.waitTableAvailable(table1);
199
200    // Make sure we've split regions
201    assertNotEquals(currentRegions, TEST_UTIL.getHBaseCluster().getRegions(table1));
202
203    EnvironmentEdgeManager.injectEdge(new EnvironmentEdge() {
204      // time + 30s
205      @Override
206      public long currentTime() {
207        return System.currentTimeMillis() + (30 * 1000);
208      }
209    });
210
211    IOException e = assertThrows(IOException.class, () -> {
212      backupAdmin
213        .backupTables(createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR));
214    });
215    assertTrue(e.getCause() instanceof SnapshotTTLExpiredException);
216
217    EnvironmentEdgeManager.reset();
218    backupAdmin.close();
219  }
220
221  private List<LocatedFileStatus> getBackupFiles() throws IOException {
222    FileSystem fs = TEST_UTIL.getTestFileSystem();
223    RemoteIterator<LocatedFileStatus> iter = fs.listFiles(new Path(BACKUP_ROOT_DIR), true);
224    List<LocatedFileStatus> files = new ArrayList<>();
225
226    while (iter.hasNext()) {
227      files.add(iter.next());
228    }
229
230    return files;
231  }
232}