001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup.master;
019
020import static org.junit.Assert.assertFalse;
021import static org.junit.Assert.assertTrue;
022
023import java.util.HashMap;
024import java.util.List;
025import java.util.Map;
026import org.apache.hadoop.fs.FileStatus;
027import org.apache.hadoop.hbase.HBaseClassTestRule;
028import org.apache.hadoop.hbase.TableName;
029import org.apache.hadoop.hbase.backup.BackupType;
030import org.apache.hadoop.hbase.backup.TestBackupBase;
031import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
032import org.apache.hadoop.hbase.client.Connection;
033import org.apache.hadoop.hbase.client.ConnectionFactory;
034import org.apache.hadoop.hbase.client.Put;
035import org.apache.hadoop.hbase.client.Table;
036import org.apache.hadoop.hbase.master.HMaster;
037import org.apache.hadoop.hbase.testclassification.LargeTests;
038import org.apache.hadoop.hbase.util.Bytes;
039import org.junit.ClassRule;
040import org.junit.Test;
041import org.junit.experimental.categories.Category;
042import org.slf4j.Logger;
043import org.slf4j.LoggerFactory;
044
045import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
046import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
047
048@Category(LargeTests.class)
049public class TestBackupLogCleaner extends TestBackupBase {
050
051  @ClassRule
052  public static final HBaseClassTestRule CLASS_RULE =
053    HBaseClassTestRule.forClass(TestBackupLogCleaner.class);
054
055  private static final Logger LOG = LoggerFactory.getLogger(TestBackupLogCleaner.class);
056
057  // implements all test cases in 1 test since incremental full backup/
058  // incremental backup has dependencies
059
060  @Test
061  public void testBackupLogCleaner() throws Exception {
062
063    // #1 - create full backup for all tables
064    LOG.info("create full backup image for all tables");
065
066    List<TableName> tableSetFullList = Lists.newArrayList(table1, table2, table3, table4);
067
068    try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) {
069      // Verify that we have no backup sessions yet
070      assertFalse(systemTable.hasBackupSessions());
071
072      List<FileStatus> walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
073      BackupLogCleaner cleaner = new BackupLogCleaner();
074      cleaner.setConf(TEST_UTIL.getConfiguration());
075      Map<String, Object> params = new HashMap<>();
076      params.put(HMaster.MASTER, TEST_UTIL.getHBaseCluster().getMaster());
077      cleaner.init(params);
078      cleaner.setConf(TEST_UTIL.getConfiguration());
079
080      Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
081      int size = Iterables.size(deletable);
082
083      // We can delete all files because we do not have yet recorded backup sessions
084      assertTrue(size == walFiles.size());
085
086      String backupIdFull = fullTableBackup(tableSetFullList);
087      assertTrue(checkSucceeded(backupIdFull));
088      // Check one more time
089      deletable = cleaner.getDeletableFiles(walFiles);
090      // We can delete wal files because they were saved into backup system table table
091      size = Iterables.size(deletable);
092      assertTrue(size == walFiles.size());
093
094      List<FileStatus> newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
095      LOG.debug("WAL list after full backup");
096
097      // New list of wal files is greater than the previous one,
098      // because new wal per RS have been opened after full backup
099      assertTrue(walFiles.size() < newWalFiles.size());
100      Connection conn = ConnectionFactory.createConnection(conf1);
101      // #2 - insert some data to table
102      Table t1 = conn.getTable(table1);
103      Put p1;
104      for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
105        p1 = new Put(Bytes.toBytes("row-t1" + i));
106        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
107        t1.put(p1);
108      }
109
110      t1.close();
111
112      Table t2 = conn.getTable(table2);
113      Put p2;
114      for (int i = 0; i < 5; i++) {
115        p2 = new Put(Bytes.toBytes("row-t2" + i));
116        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
117        t2.put(p2);
118      }
119
120      t2.close();
121
122      // #3 - incremental backup for multiple tables
123
124      List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, table3);
125      String backupIdIncMultiple =
126        backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR);
127      assertTrue(checkSucceeded(backupIdIncMultiple));
128      deletable = cleaner.getDeletableFiles(newWalFiles);
129
130      assertTrue(Iterables.size(deletable) == newWalFiles.size());
131
132      conn.close();
133    }
134  }
135}