001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup.master;
019
020import static org.junit.Assert.assertFalse;
021import static org.junit.Assert.assertTrue;
022import java.util.HashMap;
023import java.util.List;
024import java.util.Map;
025import org.apache.hadoop.fs.FileStatus;
026import org.apache.hadoop.hbase.HBaseClassTestRule;
027import org.apache.hadoop.hbase.TableName;
028import org.apache.hadoop.hbase.backup.BackupType;
029import org.apache.hadoop.hbase.backup.TestBackupBase;
030import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
031import org.apache.hadoop.hbase.client.Connection;
032import org.apache.hadoop.hbase.client.ConnectionFactory;
033import org.apache.hadoop.hbase.client.Put;
034import org.apache.hadoop.hbase.client.Table;
035import org.apache.hadoop.hbase.master.HMaster;
036import org.apache.hadoop.hbase.testclassification.LargeTests;
037import org.apache.hadoop.hbase.util.Bytes;
038import org.junit.ClassRule;
039import org.junit.Test;
040import org.junit.experimental.categories.Category;
041import org.slf4j.Logger;
042import org.slf4j.LoggerFactory;
043import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
044import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
045
046@Category(LargeTests.class)
047public class TestBackupLogCleaner extends TestBackupBase {
048
049  @ClassRule
050  public static final HBaseClassTestRule CLASS_RULE =
051      HBaseClassTestRule.forClass(TestBackupLogCleaner.class);
052
053  private static final Logger LOG = LoggerFactory.getLogger(TestBackupLogCleaner.class);
054
055  // implements all test cases in 1 test since incremental full backup/
056  // incremental backup has dependencies
057
058  @Test
059  public void testBackupLogCleaner() throws Exception {
060
061    // #1 - create full backup for all tables
062    LOG.info("create full backup image for all tables");
063
064    List<TableName> tableSetFullList = Lists.newArrayList(table1, table2, table3, table4);
065
066    try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) {
067      // Verify that we have no backup sessions yet
068      assertFalse(systemTable.hasBackupSessions());
069
070      List<FileStatus> walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
071      BackupLogCleaner cleaner = new BackupLogCleaner();
072      cleaner.setConf(TEST_UTIL.getConfiguration());
073      Map<String, Object> params = new HashMap<>();
074      params.put(HMaster.MASTER, TEST_UTIL.getHBaseCluster().getMaster());
075      cleaner.init(params);
076      cleaner.setConf(TEST_UTIL.getConfiguration());
077
078      Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
079      int size = Iterables.size(deletable);
080
081      // We can delete all files because we do not have yet recorded backup sessions
082      assertTrue(size == walFiles.size());
083
084      String backupIdFull = fullTableBackup(tableSetFullList);
085      assertTrue(checkSucceeded(backupIdFull));
086      // Check one more time
087      deletable = cleaner.getDeletableFiles(walFiles);
088      // We can delete wal files because they were saved into backup system table table
089      size = Iterables.size(deletable);
090      assertTrue(size == walFiles.size());
091
092      List<FileStatus> newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
093      LOG.debug("WAL list after full backup");
094
095      // New list of wal files is greater than the previous one,
096      // because new wal per RS have been opened after full backup
097      assertTrue(walFiles.size() < newWalFiles.size());
098      Connection conn = ConnectionFactory.createConnection(conf1);
099      // #2 - insert some data to table
100      Table t1 = conn.getTable(table1);
101      Put p1;
102      for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
103        p1 = new Put(Bytes.toBytes("row-t1" + i));
104        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
105        t1.put(p1);
106      }
107
108      t1.close();
109
110      Table t2 = conn.getTable(table2);
111      Put p2;
112      for (int i = 0; i < 5; i++) {
113        p2 = new Put(Bytes.toBytes("row-t2" + i));
114        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
115        t2.put(p2);
116      }
117
118      t2.close();
119
120      // #3 - incremental backup for multiple tables
121
122      List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, table3);
123      String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList,
124        BACKUP_ROOT_DIR);
125      assertTrue(checkSucceeded(backupIdIncMultiple));
126      deletable = cleaner.getDeletableFiles(newWalFiles);
127
128      assertTrue(Iterables.size(deletable) == newWalFiles.size());
129
130      conn.close();
131    }
132  }
133}