001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import static org.junit.Assert.assertNull;
021import static org.junit.Assert.assertTrue;
022
023import java.io.IOException;
024import java.util.ArrayList;
025import java.util.List;
026import java.util.Optional;
027import org.apache.hadoop.fs.FileSystem;
028import org.apache.hadoop.fs.Path;
029import org.apache.hadoop.hbase.HBaseClassTestRule;
030import org.apache.hadoop.hbase.HBaseTestingUtil;
031import org.apache.hadoop.hbase.HConstants;
032import org.apache.hadoop.hbase.TableName;
033import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
034import org.apache.hadoop.hbase.client.Admin;
035import org.apache.hadoop.hbase.client.Connection;
036import org.apache.hadoop.hbase.client.SnapshotDescription;
037import org.apache.hadoop.hbase.client.TableDescriptor;
038import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
039import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
040import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
041import org.apache.hadoop.hbase.coprocessor.MasterObserver;
042import org.apache.hadoop.hbase.coprocessor.ObserverContext;
043import org.apache.hadoop.hbase.testclassification.LargeTests;
044import org.apache.hadoop.util.ToolRunner;
045import org.junit.BeforeClass;
046import org.junit.ClassRule;
047import org.junit.Test;
048import org.junit.experimental.categories.Category;
049import org.slf4j.Logger;
050import org.slf4j.LoggerFactory;
051
052import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
053
054/**
055 * This class is only a base for other integration-level backup tests. Do not add tests here.
056 * TestBackupSmallTests is where tests that don't require bring machines up/down should go All other
057 * tests should have their own classes and extend this one
058 */
059@Category(LargeTests.class)
060public class TestBackupDeleteWithFailures extends TestBackupBase{
061
062  @ClassRule
063  public static final HBaseClassTestRule CLASS_RULE =
064      HBaseClassTestRule.forClass(TestBackupDeleteWithFailures.class);
065
066  private static final Logger LOG = LoggerFactory.getLogger(TestBackupDeleteWithFailures.class);
067
068  public enum Failure {
069    NO_FAILURES,
070    PRE_SNAPSHOT_FAILURE,
071    PRE_DELETE_SNAPSHOT_FAILURE,
072    POST_DELETE_SNAPSHOT_FAILURE
073  }
074
075  public static class MasterSnapshotObserver implements MasterCoprocessor, MasterObserver {
076    List<Failure> failures = new ArrayList<>();
077
078    public void setFailures(Failure ... f) {
079      failures.clear();
080      for (int i = 0; i < f.length; i++) {
081        failures.add(f[i]);
082      }
083    }
084
085    @Override
086    public Optional<MasterObserver> getMasterObserver() {
087      return Optional.of(this);
088    }
089
090    @Override
091    public void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
092        final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor)
093        throws IOException {
094      if (failures.contains(Failure.PRE_SNAPSHOT_FAILURE)) {
095        throw new IOException("preSnapshot");
096      }
097    }
098
099    @Override
100    public void preDeleteSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
101        SnapshotDescription snapshot) throws IOException {
102      if (failures.contains(Failure.PRE_DELETE_SNAPSHOT_FAILURE)) {
103        throw new IOException("preDeleteSnapshot");
104      }
105    }
106
107    @Override
108    public void postDeleteSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
109        SnapshotDescription snapshot) throws IOException {
110      if (failures.contains(Failure.POST_DELETE_SNAPSHOT_FAILURE)) {
111        throw new IOException("postDeleteSnapshot");
112      }
113    }
114  }
115
116  /**
117   * Setup Cluster with appropriate configurations before running tests.
118   *
119   * @throws Exception if starting the mini cluster or setting up the tables fails
120   */
121  @BeforeClass
122  public static void setUp() throws Exception {
123    TEST_UTIL = new HBaseTestingUtil();
124    conf1 = TEST_UTIL.getConfiguration();
125    conf1.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
126      MasterSnapshotObserver.class.getName());
127    conf1.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
128    setUpHelper();
129  }
130
131  private MasterSnapshotObserver getMasterSnapshotObserver() {
132    return TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost()
133        .findCoprocessor(MasterSnapshotObserver.class);
134  }
135
136  @Test
137  public void testBackupDeleteWithFailures() throws Exception {
138    testBackupDeleteWithFailuresAfter(1, Failure.PRE_DELETE_SNAPSHOT_FAILURE);
139    testBackupDeleteWithFailuresAfter(0, Failure.POST_DELETE_SNAPSHOT_FAILURE);
140    testBackupDeleteWithFailuresAfter(1, Failure.PRE_SNAPSHOT_FAILURE);
141  }
142
143  private void testBackupDeleteWithFailuresAfter(int expected, Failure ...failures)
144          throws Exception {
145    LOG.info("test repair backup delete on a single table with data and failures "+ failures[0]);
146    List<TableName> tableList = Lists.newArrayList(table1);
147    String backupId = fullTableBackup(tableList);
148    assertTrue(checkSucceeded(backupId));
149    LOG.info("backup complete");
150    String[] backupIds = new String[] { backupId };
151    BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection());
152    BackupInfo info = table.readBackupInfo(backupId);
153    Path path = new Path(info.getBackupRootDir(), backupId);
154    FileSystem fs = FileSystem.get(path.toUri(), conf1);
155    assertTrue(fs.exists(path));
156
157    Connection conn = TEST_UTIL.getConnection();
158    Admin admin = conn.getAdmin();
159    MasterSnapshotObserver observer = getMasterSnapshotObserver();
160
161    observer.setFailures(failures);
162    try {
163      getBackupAdmin().deleteBackups(backupIds);
164    } catch(IOException e) {
165      if(expected != 1) {
166        assertTrue(false);
167      }
168    }
169
170    // Verify that history length == expected after delete failure
171    assertTrue(table.getBackupHistory().size() == expected);
172
173    String[] ids = table.getListOfBackupIdsFromDeleteOperation();
174
175    // Verify that we still have delete record in backup system table
176    if(expected == 1) {
177      assertTrue(ids.length == 1);
178      assertTrue(ids[0].equals(backupId));
179    } else {
180      assertNull(ids);
181    }
182
183    // Now run repair command to repair "failed" delete operation
184    String[] args = new String[] {"repair"};
185
186    observer.setFailures(Failure.NO_FAILURES);
187
188    // Run repair
189    int ret = ToolRunner.run(conf1, new BackupDriver(), args);
190    assertTrue(ret == 0);
191    // Verify that history length == 0
192    assertTrue(table.getBackupHistory().size() == 0);
193    ids = table.getListOfBackupIdsFromDeleteOperation();
194
195    // Verify that we do not have delete record in backup system table
196    assertNull(ids);
197
198    table.close();
199    admin.close();
200  }
201}