001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import java.io.IOException;
021import java.util.ArrayList;
022import java.util.HashMap;
023import java.util.Iterator;
024import java.util.List;
025import java.util.Map;
026import java.util.Map.Entry;
027import java.util.Objects;
028import org.apache.hadoop.conf.Configuration;
029import org.apache.hadoop.fs.FileStatus;
030import org.apache.hadoop.fs.FileSystem;
031import org.apache.hadoop.fs.LocatedFileStatus;
032import org.apache.hadoop.fs.Path;
033import org.apache.hadoop.fs.RemoteIterator;
034import org.apache.hadoop.hbase.HBaseConfiguration;
035import org.apache.hadoop.hbase.HBaseTestingUtil;
036import org.apache.hadoop.hbase.HConstants;
037import org.apache.hadoop.hbase.NamespaceDescriptor;
038import org.apache.hadoop.hbase.TableName;
039import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase;
040import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
041import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
042import org.apache.hadoop.hbase.backup.impl.BackupManager;
043import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
044import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient;
045import org.apache.hadoop.hbase.backup.impl.IncrementalBackupManager;
046import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient;
047import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
048import org.apache.hadoop.hbase.backup.util.BackupUtils;
049import org.apache.hadoop.hbase.client.Admin;
050import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
051import org.apache.hadoop.hbase.client.Connection;
052import org.apache.hadoop.hbase.client.ConnectionFactory;
053import org.apache.hadoop.hbase.client.Durability;
054import org.apache.hadoop.hbase.client.Put;
055import org.apache.hadoop.hbase.client.Table;
056import org.apache.hadoop.hbase.client.TableDescriptor;
057import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
058import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
059import org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner;
060import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting;
061import org.apache.hadoop.hbase.security.UserProvider;
062import org.apache.hadoop.hbase.security.access.SecureTestUtil;
063import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
064import org.apache.hadoop.hbase.util.Bytes;
065import org.apache.hadoop.hbase.util.CommonFSUtils;
066import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
067import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
068import org.apache.hadoop.hbase.wal.WALFactory;
069import org.junit.AfterClass;
070import org.junit.BeforeClass;
071import org.slf4j.Logger;
072import org.slf4j.LoggerFactory;
073
074/**
075 * This class is only a base for other integration-level backup tests. Do not add tests here.
076 * TestBackupSmallTests is where tests that don't require bring machines up/down should go All other
077 * tests should have their own classes and extend this one
078 */
079public class TestBackupBase {
080  private static final Logger LOG = LoggerFactory.getLogger(TestBackupBase.class);
081
082  protected static HBaseTestingUtil TEST_UTIL;
083  protected static HBaseTestingUtil TEST_UTIL2;
084  protected static Configuration conf1;
085  protected static Configuration conf2;
086
087  protected static TableName table1 = TableName.valueOf("table1");
088  protected static TableDescriptor table1Desc;
089  protected static TableName table2 = TableName.valueOf("table2");
090  protected static TableName table3 = TableName.valueOf("table3");
091  protected static TableName table4 = TableName.valueOf("table4");
092
093  protected static TableName table1_restore = TableName.valueOf("default:table1");
094  protected static TableName table2_restore = TableName.valueOf("ns2:table2");
095  protected static TableName table3_restore = TableName.valueOf("ns3:table3_restore");
096
097  protected static final int NB_ROWS_IN_BATCH = 99;
098  protected static final byte[] qualName = Bytes.toBytes("q1");
099  protected static final byte[] famName = Bytes.toBytes("f");
100
101  protected static String BACKUP_ROOT_DIR;
102  protected static String BACKUP_REMOTE_ROOT_DIR;
103  protected static String provider = "defaultProvider";
104  protected static boolean secure = false;
105
106  protected static boolean autoRestoreOnFailure;
107  protected static boolean useSecondCluster;
108
109  static class IncrementalTableBackupClientForTest extends IncrementalTableBackupClient {
110    public IncrementalTableBackupClientForTest() {
111    }
112
113    public IncrementalTableBackupClientForTest(Connection conn, String backupId,
114      BackupRequest request) throws IOException {
115      super(conn, backupId, request);
116    }
117
118    @Override
119    public void execute() throws IOException {
120      // case INCREMENTAL_COPY:
121      try {
122        // case PREPARE_INCREMENTAL:
123        failStageIf(Stage.stage_0);
124        beginBackup(backupManager, backupInfo);
125
126        failStageIf(Stage.stage_1);
127        backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL);
128        LOG.debug("For incremental backup, current table set is "
129          + backupManager.getIncrementalBackupTableSet());
130        newTimestamps = ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap();
131        // copy out the table and region info files for each table
132        BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
133        // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
134        convertWALsToHFiles();
135        incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() },
136          backupInfo.getBackupRootDir());
137        failStageIf(Stage.stage_2);
138
139        // case INCR_BACKUP_COMPLETE:
140        // set overall backup status: complete. Here we make sure to complete the backup.
141        // After this checkpoint, even if entering cancel process, will let the backup finished
142        // Set the previousTimestampMap which is before this current log roll to the manifest.
143        Map<TableName, Map<String, Long>> previousTimestampMap =
144          backupManager.readLogTimestampMap();
145        backupInfo.setIncrTimestampMap(previousTimestampMap);
146
147        // The table list in backupInfo is good for both full backup and incremental backup.
148        // For incremental backup, it contains the incremental backup table set.
149        backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
150        failStageIf(Stage.stage_3);
151
152        Map<TableName, Map<String, Long>> newTableSetTimestampMap =
153          backupManager.readLogTimestampMap();
154
155        Long newStartCode =
156          BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
157        backupManager.writeBackupStartCode(newStartCode);
158
159        handleBulkLoad(backupInfo.getTableNames());
160        failStageIf(Stage.stage_4);
161
162        // backup complete
163        completeBackup(conn, backupInfo, backupManager, BackupType.INCREMENTAL, conf);
164
165      } catch (Exception e) {
166        failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ",
167          BackupType.INCREMENTAL, conf);
168        throw new IOException(e);
169      }
170    }
171  }
172
173  static class FullTableBackupClientForTest extends FullTableBackupClient {
174    public FullTableBackupClientForTest() {
175    }
176
177    public FullTableBackupClientForTest(Connection conn, String backupId, BackupRequest request)
178      throws IOException {
179      super(conn, backupId, request);
180    }
181
182    @Override
183    public void execute() throws IOException {
184      // Get the stage ID to fail on
185      try (Admin admin = conn.getAdmin()) {
186        // Begin BACKUP
187        beginBackup(backupManager, backupInfo);
188        failStageIf(Stage.stage_0);
189        String savedStartCode;
190        boolean firstBackup;
191        // do snapshot for full table backup
192        savedStartCode = backupManager.readBackupStartCode();
193        firstBackup = savedStartCode == null || Long.parseLong(savedStartCode) == 0L;
194        if (firstBackup) {
195          // This is our first backup. Let's put some marker to system table so that we can hold the
196          // logs while we do the backup.
197          backupManager.writeBackupStartCode(0L);
198        }
199        failStageIf(Stage.stage_1);
200        // We roll log here before we do the snapshot. It is possible there is duplicate data
201        // in the log that is already in the snapshot. But if we do it after the snapshot, we
202        // could have data loss.
203        // A better approach is to do the roll log on each RS in the same global procedure as
204        // the snapshot.
205        LOG.info("Execute roll log procedure for full backup ...");
206
207        Map<String, String> props = new HashMap<>();
208        props.put("backupRoot", backupInfo.getBackupRootDir());
209        admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
210          LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
211        failStageIf(Stage.stage_2);
212        newTimestamps = backupManager.readRegionServerLastLogRollResult();
213
214        // SNAPSHOT_TABLES:
215        backupInfo.setPhase(BackupPhase.SNAPSHOT);
216        for (TableName tableName : tableList) {
217          String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime())
218            + "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
219
220          snapshotTable(admin, tableName, snapshotName);
221          backupInfo.setSnapshotName(tableName, snapshotName);
222        }
223        failStageIf(Stage.stage_3);
224        // SNAPSHOT_COPY:
225        // do snapshot copy
226        LOG.debug("snapshot copy for " + backupId);
227        snapshotCopy(backupInfo);
228        // Updates incremental backup table set
229        backupManager.addIncrementalBackupTableSet(backupInfo.getTables());
230
231        // BACKUP_COMPLETE:
232        // set overall backup status: complete. Here we make sure to complete the backup.
233        // After this checkpoint, even if entering cancel process, will let the backup finished
234        backupInfo.setState(BackupState.COMPLETE);
235        // The table list in backupInfo is good for both full backup and incremental backup.
236        // For incremental backup, it contains the incremental backup table set.
237        backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
238
239        Map<TableName, Map<String, Long>> newTableSetTimestampMap =
240          backupManager.readLogTimestampMap();
241
242        Long newStartCode =
243          BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
244        backupManager.writeBackupStartCode(newStartCode);
245        failStageIf(Stage.stage_4);
246        // backup complete
247        completeBackup(conn, backupInfo, backupManager, BackupType.FULL, conf);
248
249      } catch (Exception e) {
250
251        if (autoRestoreOnFailure) {
252          failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ",
253            BackupType.FULL, conf);
254        }
255        throw new IOException(e);
256      }
257    }
258  }
259
260  public static void setUpHelper() throws Exception {
261    BACKUP_ROOT_DIR = Path.SEPARATOR + "backupUT";
262    BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT";
263
264    if (secure) {
265      // set the always on security provider
266      UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(),
267        HadoopSecurityEnabledUserProviderForTesting.class);
268      // setup configuration
269      SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());
270    }
271    conf1.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
272    BackupManager.decorateMasterConfiguration(conf1);
273    BackupManager.decorateRegionServerConfiguration(conf1);
274    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
275    // Set TTL for old WALs to 1 sec to enforce fast cleaning of an archived
276    // WAL files
277    conf1.setLong(TimeToLiveLogCleaner.TTL_CONF_KEY, 1000);
278    conf1.setLong(LogCleaner.OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC, 1000);
279
280    // Set MultiWAL (with 2 default WAL files per RS)
281    conf1.set(WALFactory.WAL_PROVIDER, provider);
282    TEST_UTIL.startMiniCluster();
283
284    if (useSecondCluster) {
285      conf2 = HBaseConfiguration.create(conf1);
286      conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
287      TEST_UTIL2 = new HBaseTestingUtil(conf2);
288      TEST_UTIL2.setZkCluster(TEST_UTIL.getZkCluster());
289      TEST_UTIL2.startMiniDFSCluster(3);
290      String root2 = TEST_UTIL2.getConfiguration().get("fs.defaultFS");
291      Path p = new Path(new Path(root2), "/tmp/wal");
292      CommonFSUtils.setWALRootDir(TEST_UTIL2.getConfiguration(), p);
293      TEST_UTIL2.startMiniCluster();
294    }
295    conf1 = TEST_UTIL.getConfiguration();
296
297    TEST_UTIL.startMiniMapReduceCluster();
298    BACKUP_ROOT_DIR =
299      new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")), BACKUP_ROOT_DIR)
300        .toString();
301    LOG.info("ROOTDIR " + BACKUP_ROOT_DIR);
302    if (useSecondCluster) {
303      BACKUP_REMOTE_ROOT_DIR = new Path(
304        new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS")) + BACKUP_REMOTE_ROOT_DIR)
305          .toString();
306      LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
307    }
308    createTables();
309    populateFromMasterConfig(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), conf1);
310  }
311
312  /**
313   * Setup Cluster with appropriate configurations before running tests.
314   * @throws Exception if starting the mini cluster or setting up the tables fails
315   */
316  @BeforeClass
317  public static void setUp() throws Exception {
318    TEST_UTIL = new HBaseTestingUtil();
319    conf1 = TEST_UTIL.getConfiguration();
320    autoRestoreOnFailure = true;
321    useSecondCluster = false;
322    setUpHelper();
323  }
324
325  private static void populateFromMasterConfig(Configuration masterConf, Configuration conf) {
326    Iterator<Entry<String, String>> it = masterConf.iterator();
327    while (it.hasNext()) {
328      Entry<String, String> e = it.next();
329      conf.set(e.getKey(), e.getValue());
330    }
331  }
332
333  @AfterClass
334  public static void tearDown() throws Exception {
335    try {
336      SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin());
337    } catch (Exception e) {
338    }
339    SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
340    if (useSecondCluster) {
341      TEST_UTIL2.shutdownMiniCluster();
342    }
343    TEST_UTIL.shutdownMiniCluster();
344    TEST_UTIL.shutdownMiniMapReduceCluster();
345    autoRestoreOnFailure = true;
346    useSecondCluster = false;
347  }
348
349  Table insertIntoTable(Connection conn, TableName table, byte[] family, int id, int numRows)
350    throws IOException {
351    Table t = conn.getTable(table);
352    Put p1;
353    for (int i = 0; i < numRows; i++) {
354      p1 = new Put(Bytes.toBytes("row-" + table + "-" + id + "-" + i));
355      p1.addColumn(family, qualName, Bytes.toBytes("val" + i));
356      t.put(p1);
357    }
358    return t;
359  }
360
361  protected BackupRequest createBackupRequest(BackupType type, List<TableName> tables,
362    String path) {
363    BackupRequest.Builder builder = new BackupRequest.Builder();
364    BackupRequest request =
365      builder.withBackupType(type).withTableList(tables).withTargetRootDir(path).build();
366    return request;
367  }
368
369  protected String backupTables(BackupType type, List<TableName> tables, String path)
370    throws IOException {
371    Connection conn = null;
372    BackupAdmin badmin = null;
373    String backupId;
374    try {
375      conn = ConnectionFactory.createConnection(conf1);
376      badmin = new BackupAdminImpl(conn);
377      BackupRequest request = createBackupRequest(type, tables, path);
378      backupId = badmin.backupTables(request);
379    } finally {
380      if (badmin != null) {
381        badmin.close();
382      }
383      if (conn != null) {
384        conn.close();
385      }
386    }
387    return backupId;
388  }
389
390  protected String fullTableBackup(List<TableName> tables) throws IOException {
391    return backupTables(BackupType.FULL, tables, BACKUP_ROOT_DIR);
392  }
393
394  protected String incrementalTableBackup(List<TableName> tables) throws IOException {
395    return backupTables(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
396  }
397
398  protected static void loadTable(Table table) throws Exception {
399    Put p; // 100 + 1 row to t1_syncup
400    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
401      p = new Put(Bytes.toBytes("row" + i));
402      p.setDurability(Durability.SKIP_WAL);
403      p.addColumn(famName, qualName, Bytes.toBytes("val" + i));
404      table.put(p);
405    }
406  }
407
408  protected static void createTables() throws Exception {
409    long tid = EnvironmentEdgeManager.currentTime();
410    table1 = TableName.valueOf("test-" + tid);
411    Admin ha = TEST_UTIL.getAdmin();
412
413    // Create namespaces
414    ha.createNamespace(NamespaceDescriptor.create("ns1").build());
415    ha.createNamespace(NamespaceDescriptor.create("ns2").build());
416    ha.createNamespace(NamespaceDescriptor.create("ns3").build());
417    ha.createNamespace(NamespaceDescriptor.create("ns4").build());
418
419    TableDescriptor desc = TableDescriptorBuilder.newBuilder(table1)
420      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build();
421    ha.createTable(desc);
422    table1Desc = desc;
423    Connection conn = ConnectionFactory.createConnection(conf1);
424    Table table = conn.getTable(table1);
425    loadTable(table);
426    table.close();
427    table2 = TableName.valueOf("ns2:test-" + tid + 1);
428    desc = TableDescriptorBuilder.newBuilder(table2)
429      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build();
430    ha.createTable(desc);
431    table = conn.getTable(table2);
432    loadTable(table);
433    table.close();
434    table3 = TableName.valueOf("ns3:test-" + tid + 2);
435    table = TEST_UTIL.createTable(table3, famName);
436    table.close();
437    table4 = TableName.valueOf("ns4:test-" + tid + 3);
438    table = TEST_UTIL.createTable(table4, famName);
439    table.close();
440    ha.close();
441    conn.close();
442  }
443
444  protected boolean checkSucceeded(String backupId) throws IOException {
445    BackupInfo status = getBackupInfo(backupId);
446
447    if (status == null) {
448      return false;
449    }
450
451    return status.getState() == BackupState.COMPLETE;
452  }
453
454  protected boolean checkFailed(String backupId) throws IOException {
455    BackupInfo status = getBackupInfo(backupId);
456
457    if (status == null) {
458      return false;
459    }
460
461    return status.getState() == BackupState.FAILED;
462  }
463
464  private BackupInfo getBackupInfo(String backupId) throws IOException {
465    try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
466      BackupInfo status = table.readBackupInfo(backupId);
467      return status;
468    }
469  }
470
471  protected BackupAdmin getBackupAdmin() throws IOException {
472    return new BackupAdminImpl(TEST_UTIL.getConnection());
473  }
474
475  /**
476   * Helper method
477   */
478  protected List<TableName> toList(String... args) {
479    List<TableName> ret = new ArrayList<>();
480    for (int i = 0; i < args.length; i++) {
481      ret.add(TableName.valueOf(args[i]));
482    }
483    return ret;
484  }
485
486  protected List<FileStatus> getListOfWALFiles(Configuration c) throws IOException {
487    Path logRoot = new Path(CommonFSUtils.getWALRootDir(c), HConstants.HREGION_LOGDIR_NAME);
488    FileSystem fs = logRoot.getFileSystem(c);
489    RemoteIterator<LocatedFileStatus> it = fs.listFiles(logRoot, true);
490    List<FileStatus> logFiles = new ArrayList<FileStatus>();
491    while (it.hasNext()) {
492      LocatedFileStatus lfs = it.next();
493      if (lfs.isFile() && !AbstractFSWALProvider.isMetaFile(lfs.getPath())) {
494        logFiles.add(lfs);
495        LOG.info(Objects.toString(lfs));
496      }
497    }
498    return logFiles;
499  }
500
501  protected void dumpBackupDir() throws IOException {
502    // Dump Backup Dir
503    FileSystem fs = FileSystem.get(conf1);
504    RemoteIterator<LocatedFileStatus> it = fs.listFiles(new Path(BACKUP_ROOT_DIR), true);
505    while (it.hasNext()) {
506      LOG.debug(Objects.toString(it.next().getPath()));
507    }
508  }
509}