001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup.impl;
019
020import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY;
021
022import java.io.IOException;
023import java.net.URI;
024import java.net.URISyntaxException;
025import java.util.ArrayList;
026import java.util.HashMap;
027import java.util.List;
028import java.util.Map;
029import java.util.Set;
030import org.apache.commons.io.FilenameUtils;
031import org.apache.commons.lang3.StringUtils;
032import org.apache.hadoop.fs.FileSystem;
033import org.apache.hadoop.fs.LocatedFileStatus;
034import org.apache.hadoop.fs.Path;
035import org.apache.hadoop.fs.RemoteIterator;
036import org.apache.hadoop.hbase.TableName;
037import org.apache.hadoop.hbase.backup.BackupCopyJob;
038import org.apache.hadoop.hbase.backup.BackupInfo;
039import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase;
040import org.apache.hadoop.hbase.backup.BackupRequest;
041import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
042import org.apache.hadoop.hbase.backup.BackupType;
043import org.apache.hadoop.hbase.backup.HBackupFileSystem;
044import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyJob;
045import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob;
046import org.apache.hadoop.hbase.backup.util.BackupUtils;
047import org.apache.hadoop.hbase.client.Admin;
048import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
049import org.apache.hadoop.hbase.client.Connection;
050import org.apache.hadoop.hbase.io.hfile.HFile;
051import org.apache.hadoop.hbase.mapreduce.WALPlayer;
052import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
053import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
054import org.apache.hadoop.hbase.snapshot.SnapshotRegionLocator;
055import org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException;
056import org.apache.hadoop.hbase.util.CommonFSUtils;
057import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
058import org.apache.hadoop.hbase.util.HFileArchiveUtil;
059import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
060import org.apache.hadoop.util.Tool;
061import org.apache.yetus.audience.InterfaceAudience;
062import org.slf4j.Logger;
063import org.slf4j.LoggerFactory;
064
065import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
066
067import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
068import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
069
070/**
071 * Incremental backup implementation. See the {@link #execute() execute} method.
072 */
073@InterfaceAudience.Private
074public class IncrementalTableBackupClient extends TableBackupClient {
075  private static final Logger LOG = LoggerFactory.getLogger(IncrementalTableBackupClient.class);
076
077  protected IncrementalTableBackupClient() {
078  }
079
080  public IncrementalTableBackupClient(final Connection conn, final String backupId,
081    BackupRequest request) throws IOException {
082    super(conn, backupId, request);
083  }
084
085  protected List<String> filterMissingFiles(List<String> incrBackupFileList) throws IOException {
086    List<String> list = new ArrayList<>();
087    for (String file : incrBackupFileList) {
088      Path p = new Path(file);
089      if (fs.exists(p) || isActiveWalPath(p)) {
090        list.add(file);
091      } else {
092        LOG.warn("Can't find file: " + file);
093      }
094    }
095    return list;
096  }
097
098  /**
099   * Check if a given path is belongs to active WAL directory
100   * @param p path
101   * @return true, if yes
102   */
103  protected boolean isActiveWalPath(Path p) {
104    return !AbstractFSWALProvider.isArchivedLogFile(p);
105  }
106
107  protected static int getIndex(TableName tbl, List<TableName> sTableList) {
108    if (sTableList == null) {
109      return 0;
110    }
111
112    for (int i = 0; i < sTableList.size(); i++) {
113      if (tbl.equals(sTableList.get(i))) {
114        return i;
115      }
116    }
117    return -1;
118  }
119
120  /**
121   * Reads bulk load records from backup table, iterates through the records and forms the paths for
122   * bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination. This method does NOT
123   * clean up the entries in the bulk load system table. Those entries should not be cleaned until
124   * the backup is marked as complete.
125   * @param tablesToBackup list of tables to be backed up
126   */
127  protected List<BulkLoad> handleBulkLoad(List<TableName> tablesToBackup) throws IOException {
128    Map<TableName, MergeSplitBulkloadInfo> toBulkload = new HashMap<>();
129    List<BulkLoad> bulkLoads = backupManager.readBulkloadRows(tablesToBackup);
130    FileSystem tgtFs;
131    try {
132      tgtFs = FileSystem.get(new URI(backupInfo.getBackupRootDir()), conf);
133    } catch (URISyntaxException use) {
134      throw new IOException("Unable to get FileSystem", use);
135    }
136    Path rootdir = CommonFSUtils.getRootDir(conf);
137    Path tgtRoot = new Path(new Path(backupInfo.getBackupRootDir()), backupId);
138
139    for (BulkLoad bulkLoad : bulkLoads) {
140      TableName srcTable = bulkLoad.getTableName();
141      MergeSplitBulkloadInfo bulkloadInfo =
142        toBulkload.computeIfAbsent(srcTable, MergeSplitBulkloadInfo::new);
143      String regionName = bulkLoad.getRegion();
144      String fam = bulkLoad.getColumnFamily();
145      String filename = FilenameUtils.getName(bulkLoad.getHfilePath());
146
147      if (!tablesToBackup.contains(srcTable)) {
148        LOG.debug("Skipping {} since it is not in tablesToBackup", srcTable);
149        continue;
150      }
151      Path tblDir = CommonFSUtils.getTableDir(rootdir, srcTable);
152      Path p = new Path(tblDir, regionName + Path.SEPARATOR + fam + Path.SEPARATOR + filename);
153
154      String srcTableQualifier = srcTable.getQualifierAsString();
155      String srcTableNs = srcTable.getNamespaceAsString();
156      Path tgtFam = new Path(tgtRoot, srcTableNs + Path.SEPARATOR + srcTableQualifier
157        + Path.SEPARATOR + regionName + Path.SEPARATOR + fam);
158      if (!tgtFs.mkdirs(tgtFam)) {
159        throw new IOException("couldn't create " + tgtFam);
160      }
161      Path tgt = new Path(tgtFam, filename);
162
163      Path archiveDir = HFileArchiveUtil.getStoreArchivePath(conf, srcTable, regionName, fam);
164      Path archive = new Path(archiveDir, filename);
165
166      if (fs.exists(p)) {
167        if (LOG.isTraceEnabled()) {
168          LOG.trace("found bulk hfile {} in {} for {}", bulkLoad.getHfilePath(), p.getParent(),
169            srcTableQualifier);
170          LOG.trace("copying {} to {}", p, tgt);
171        }
172        bulkloadInfo.addActiveFile(p.toString());
173      } else if (fs.exists(archive)) {
174        LOG.debug("copying archive {} to {}", archive, tgt);
175        bulkloadInfo.addArchiveFiles(archive.toString());
176      }
177    }
178
179    for (MergeSplitBulkloadInfo bulkloadInfo : toBulkload.values()) {
180      mergeSplitAndCopyBulkloadedHFiles(bulkloadInfo.getActiveFiles(),
181        bulkloadInfo.getArchiveFiles(), bulkloadInfo.getSrcTable(), tgtFs);
182    }
183
184    return bulkLoads;
185  }
186
187  private void mergeSplitAndCopyBulkloadedHFiles(List<String> activeFiles,
188    List<String> archiveFiles, TableName tn, FileSystem tgtFs) throws IOException {
189    int attempt = 1;
190
191    while (!activeFiles.isEmpty()) {
192      LOG.info("MergeSplit {} active bulk loaded files. Attempt={}", activeFiles.size(), attempt++);
193      // Active file can be archived during copy operation,
194      // we need to handle this properly
195      try {
196        mergeSplitAndCopyBulkloadedHFiles(activeFiles, tn, tgtFs);
197        break;
198      } catch (IOException e) {
199        int numActiveFiles = activeFiles.size();
200        updateFileLists(activeFiles, archiveFiles);
201        if (activeFiles.size() < numActiveFiles) {
202          continue;
203        }
204
205        throw e;
206      }
207    }
208
209    if (!archiveFiles.isEmpty()) {
210      mergeSplitAndCopyBulkloadedHFiles(archiveFiles, tn, tgtFs);
211    }
212  }
213
214  private void mergeSplitAndCopyBulkloadedHFiles(List<String> files, TableName tn, FileSystem tgtFs)
215    throws IOException {
216    MapReduceHFileSplitterJob player = new MapReduceHFileSplitterJob();
217    conf.set(MapReduceHFileSplitterJob.BULK_OUTPUT_CONF_KEY,
218      getBulkOutputDirForTable(tn).toString());
219    player.setConf(conf);
220
221    String inputDirs = StringUtils.join(files, ",");
222    String[] args = { inputDirs, tn.getNameWithNamespaceInclAsString() };
223
224    int result;
225
226    try {
227      result = player.run(args);
228    } catch (Exception e) {
229      LOG.error("Failed to run MapReduceHFileSplitterJob", e);
230      // Delete the bulkload directory if we fail to run the HFile splitter job for any reason
231      // as it might be re-tried
232      deleteBulkLoadDirectory();
233      throw new IOException(e);
234    }
235
236    if (result != 0) {
237      throw new IOException(
238        "Failed to run MapReduceHFileSplitterJob with invalid result: " + result);
239    }
240
241    incrementalCopyBulkloadHFiles(tgtFs, tn);
242  }
243
244  private void updateFileLists(List<String> activeFiles, List<String> archiveFiles)
245    throws IOException {
246    List<String> newlyArchived = new ArrayList<>();
247
248    for (String spath : activeFiles) {
249      if (!fs.exists(new Path(spath))) {
250        newlyArchived.add(spath);
251      }
252    }
253
254    if (newlyArchived.size() > 0) {
255      activeFiles.removeAll(newlyArchived);
256      archiveFiles.addAll(newlyArchived);
257    }
258
259    LOG.debug(newlyArchived.size() + " files have been archived.");
260  }
261
262  /**
263   * @throws IOException                   If the execution of the backup fails
264   * @throws ColumnFamilyMismatchException If the column families of the current table do not match
265   *                                       the column families for the last full backup. In which
266   *                                       case, a full backup should be taken
267   */
268  @Override
269  public void execute() throws IOException, ColumnFamilyMismatchException {
270    try {
271      Map<TableName, String> tablesToFullBackupIds = getFullBackupIds();
272      verifyCfCompatibility(backupInfo.getTables(), tablesToFullBackupIds);
273
274      // case PREPARE_INCREMENTAL:
275      beginBackup(backupManager, backupInfo);
276      backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL);
277      LOG.debug("For incremental backup, current table set is "
278        + backupManager.getIncrementalBackupTableSet());
279      newTimestamps = ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap();
280    } catch (Exception e) {
281      // fail the overall backup and return
282      failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ",
283        BackupType.INCREMENTAL, conf);
284      throw new IOException(e);
285    }
286
287    // case INCREMENTAL_COPY:
288    try {
289      // copy out the table and region info files for each table
290      BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
291      setupRegionLocator();
292      // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
293      convertWALsToHFiles();
294      incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() },
295        backupInfo.getBackupRootDir());
296    } catch (Exception e) {
297      String msg = "Unexpected exception in incremental-backup: incremental copy " + backupId;
298      // fail the overall backup and return
299      failBackup(conn, backupInfo, backupManager, e, msg, BackupType.INCREMENTAL, conf);
300      throw new IOException(e);
301    }
302    // case INCR_BACKUP_COMPLETE:
303    // set overall backup status: complete. Here we make sure to complete the backup.
304    // After this checkpoint, even if entering cancel process, will let the backup finished
305    try {
306      // Set the previousTimestampMap which is before this current log roll to the manifest.
307      Map<TableName, Map<String, Long>> previousTimestampMap = backupManager.readLogTimestampMap();
308      backupInfo.setIncrTimestampMap(previousTimestampMap);
309
310      // The table list in backupInfo is good for both full backup and incremental backup.
311      // For incremental backup, it contains the incremental backup table set.
312      backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
313
314      Map<TableName, Map<String, Long>> newTableSetTimestampMap =
315        backupManager.readLogTimestampMap();
316
317      backupInfo.setTableSetTimestampMap(newTableSetTimestampMap);
318      Long newStartCode =
319        BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
320      backupManager.writeBackupStartCode(newStartCode);
321
322      List<BulkLoad> bulkLoads = handleBulkLoad(backupInfo.getTableNames());
323
324      // backup complete
325      completeBackup(conn, backupInfo, BackupType.INCREMENTAL, conf);
326
327      List<byte[]> bulkLoadedRows = Lists.transform(bulkLoads, BulkLoad::getRowKey);
328      backupManager.deleteBulkLoadedRows(bulkLoadedRows);
329    } catch (IOException e) {
330      failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ",
331        BackupType.INCREMENTAL, conf);
332      throw new IOException(e);
333    }
334  }
335
336  protected void incrementalCopyHFiles(String[] files, String backupDest) throws IOException {
337    try {
338      LOG.debug("Incremental copy HFiles is starting. dest=" + backupDest);
339      // set overall backup phase: incremental_copy
340      backupInfo.setPhase(BackupPhase.INCREMENTAL_COPY);
341      // get incremental backup file list and prepare parms for DistCp
342      String[] strArr = new String[files.length + 1];
343      System.arraycopy(files, 0, strArr, 0, files.length);
344      strArr[strArr.length - 1] = backupDest;
345
346      String jobname = "Incremental_Backup-HFileCopy-" + backupInfo.getBackupId();
347      if (LOG.isDebugEnabled()) {
348        LOG.debug("Setting incremental copy HFiles job name to : " + jobname);
349      }
350      conf.set(JOB_NAME_CONF_KEY, jobname);
351
352      BackupCopyJob copyService = BackupRestoreFactory.getBackupCopyJob(conf);
353      int res = copyService.copy(backupInfo, backupManager, conf, BackupType.INCREMENTAL, strArr);
354      if (res != 0) {
355        LOG.error("Copy incremental HFile files failed with return code: " + res + ".");
356        throw new IOException(
357          "Failed copy from " + StringUtils.join(files, ',') + " to " + backupDest);
358      }
359      LOG.debug("Incremental copy HFiles from " + StringUtils.join(files, ',') + " to " + backupDest
360        + " finished.");
361    } finally {
362      deleteBulkLoadDirectory();
363    }
364  }
365
366  protected void deleteBulkLoadDirectory() throws IOException {
367    // delete original bulk load directory on method exit
368    Path path = getBulkOutputDir();
369    FileSystem fs = FileSystem.get(path.toUri(), conf);
370    boolean result = fs.delete(path, true);
371    if (!result) {
372      LOG.warn("Could not delete " + path);
373    }
374  }
375
376  protected void convertWALsToHFiles() throws IOException {
377    // get incremental backup file list and prepare parameters for DistCp
378    List<String> incrBackupFileList = backupInfo.getIncrBackupFileList();
379    // Get list of tables in incremental backup set
380    Set<TableName> tableSet = backupManager.getIncrementalBackupTableSet();
381    // filter missing files out (they have been copied by previous backups)
382    incrBackupFileList = filterMissingFiles(incrBackupFileList);
383    List<String> tableList = new ArrayList<String>();
384    for (TableName table : tableSet) {
385      // Check if table exists
386      if (tableExists(table, conn)) {
387        tableList.add(table.getNameAsString());
388      } else {
389        LOG.warn("Table " + table + " does not exists. Skipping in WAL converter");
390      }
391    }
392    walToHFiles(incrBackupFileList, tableList);
393
394  }
395
396  protected boolean tableExists(TableName table, Connection conn) throws IOException {
397    try (Admin admin = conn.getAdmin()) {
398      return admin.tableExists(table);
399    }
400  }
401
402  protected void walToHFiles(List<String> dirPaths, List<String> tableList) throws IOException {
403    Tool player = new WALPlayer();
404
405    // Player reads all files in arbitrary directory structure and creates
406    // a Map task for each file. We use ';' as separator
407    // because WAL file names contains ','
408    String dirs = StringUtils.join(dirPaths, ';');
409    String jobname = "Incremental_Backup-" + backupId;
410
411    Path bulkOutputPath = getBulkOutputDir();
412    conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString());
413    conf.set(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ";");
414    conf.setBoolean(WALPlayer.MULTI_TABLES_SUPPORT, true);
415    conf.set(JOB_NAME_CONF_KEY, jobname);
416    String[] playerArgs = { dirs, StringUtils.join(tableList, ",") };
417
418    try {
419      player.setConf(conf);
420      int result = player.run(playerArgs);
421      if (result != 0) {
422        throw new IOException("WAL Player failed");
423      }
424      conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY);
425      conf.unset(JOB_NAME_CONF_KEY);
426    } catch (IOException e) {
427      throw e;
428    } catch (Exception ee) {
429      throw new IOException("Can not convert from directory " + dirs
430        + " (check Hadoop, HBase and WALPlayer M/R job logs) ", ee);
431    }
432  }
433
434  private void incrementalCopyBulkloadHFiles(FileSystem tgtFs, TableName tn) throws IOException {
435    Path bulkOutDir = getBulkOutputDirForTable(tn);
436
437    if (tgtFs.exists(bulkOutDir)) {
438      conf.setInt(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 2);
439      Path tgtPath = getTargetDirForTable(tn);
440      try {
441        RemoteIterator<LocatedFileStatus> locatedFiles = tgtFs.listFiles(bulkOutDir, true);
442        List<String> files = new ArrayList<>();
443        while (locatedFiles.hasNext()) {
444          LocatedFileStatus file = locatedFiles.next();
445          if (file.isFile() && HFile.isHFileFormat(tgtFs, file.getPath())) {
446            files.add(file.getPath().toString());
447          }
448        }
449        incrementalCopyHFiles(files.toArray(files.toArray(new String[0])), tgtPath.toString());
450      } finally {
451        conf.unset(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY);
452      }
453    }
454  }
455
456  protected Path getBulkOutputDirForTable(TableName table) {
457    Path tablePath = getBulkOutputDir();
458    tablePath = new Path(tablePath, table.getNamespaceAsString());
459    tablePath = new Path(tablePath, table.getQualifierAsString());
460    return new Path(tablePath, "data");
461  }
462
463  protected Path getBulkOutputDir() {
464    String backupId = backupInfo.getBackupId();
465    Path path = new Path(backupInfo.getBackupRootDir());
466    path = new Path(path, ".tmp");
467    path = new Path(path, backupId);
468    return path;
469  }
470
471  private Path getTargetDirForTable(TableName table) {
472    Path path = new Path(backupInfo.getBackupRootDir() + Path.SEPARATOR + backupInfo.getBackupId());
473    path = new Path(path, table.getNamespaceAsString());
474    path = new Path(path, table.getQualifierAsString());
475    return path;
476  }
477
478  private void setupRegionLocator() throws IOException {
479    Map<TableName, String> fullBackupIds = getFullBackupIds();
480    try (BackupAdminImpl backupAdmin = new BackupAdminImpl(conn)) {
481
482      for (TableName tableName : backupInfo.getTables()) {
483        String fullBackupId = fullBackupIds.get(tableName);
484        BackupInfo fullBackupInfo = backupAdmin.getBackupInfo(fullBackupId);
485        String snapshotName = fullBackupInfo.getSnapshotName(tableName);
486        Path root = HBackupFileSystem.getTableBackupPath(tableName,
487          new Path(fullBackupInfo.getBackupRootDir()), fullBackupId);
488        String manifestDir =
489          SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, root).toString();
490        SnapshotRegionLocator.setSnapshotManifestDir(conf, manifestDir, tableName);
491      }
492    }
493  }
494
495  private Map<TableName, String> getFullBackupIds() throws IOException {
496    // Ancestors are stored from newest to oldest, so we can iterate backwards
497    // in order to populate our backupId map with the most recent full backup
498    // for a given table
499    List<BackupManifest.BackupImage> images = getAncestors(backupInfo);
500    Map<TableName, String> results = new HashMap<>();
501    for (int i = images.size() - 1; i >= 0; i--) {
502      BackupManifest.BackupImage image = images.get(i);
503      if (image.getType() != BackupType.FULL) {
504        continue;
505      }
506
507      for (TableName tn : image.getTableNames()) {
508        results.put(tn, image.getBackupId());
509      }
510    }
511    return results;
512  }
513
514  /**
515   * Verifies that the current table descriptor CFs matches the descriptor CFs of the last full
516   * backup for the tables. This ensures CF compatibility across incremental backups. If a mismatch
517   * is detected, a full table backup should be taken, rather than an incremental one
518   */
519  private void verifyCfCompatibility(Set<TableName> tables,
520    Map<TableName, String> tablesToFullBackupId) throws IOException, ColumnFamilyMismatchException {
521    ColumnFamilyMismatchException.ColumnFamilyMismatchExceptionBuilder exBuilder =
522      ColumnFamilyMismatchException.newBuilder();
523    try (Admin admin = conn.getAdmin(); BackupAdminImpl backupAdmin = new BackupAdminImpl(conn)) {
524      for (TableName tn : tables) {
525        String backupId = tablesToFullBackupId.get(tn);
526        BackupInfo fullBackupInfo = backupAdmin.getBackupInfo(backupId);
527
528        ColumnFamilyDescriptor[] currentCfs = admin.getDescriptor(tn).getColumnFamilies();
529        String snapshotName = fullBackupInfo.getSnapshotName(tn);
530        Path root = HBackupFileSystem.getTableBackupPath(tn,
531          new Path(fullBackupInfo.getBackupRootDir()), fullBackupInfo.getBackupId());
532        Path manifestDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, root);
533
534        FileSystem fs;
535        try {
536          fs = FileSystem.get(new URI(fullBackupInfo.getBackupRootDir()), conf);
537        } catch (URISyntaxException e) {
538          throw new IOException("Unable to get fs for backup " + fullBackupInfo.getBackupId(), e);
539        }
540
541        SnapshotProtos.SnapshotDescription snapshotDescription =
542          SnapshotDescriptionUtils.readSnapshotInfo(fs, manifestDir);
543        SnapshotManifest manifest =
544          SnapshotManifest.open(conf, fs, manifestDir, snapshotDescription);
545        if (
546          SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDescription.getTtl(),
547            snapshotDescription.getCreationTime(), EnvironmentEdgeManager.currentTime())
548        ) {
549          throw new SnapshotTTLExpiredException(
550            ProtobufUtil.createSnapshotDesc(snapshotDescription));
551        }
552
553        ColumnFamilyDescriptor[] backupCfs = manifest.getTableDescriptor().getColumnFamilies();
554        if (!areCfsCompatible(currentCfs, backupCfs)) {
555          exBuilder.addMismatchedTable(tn, currentCfs, backupCfs);
556        }
557      }
558    }
559
560    ColumnFamilyMismatchException ex = exBuilder.build();
561    if (!ex.getMismatchedTables().isEmpty()) {
562      throw ex;
563    }
564  }
565
566  private static boolean areCfsCompatible(ColumnFamilyDescriptor[] currentCfs,
567    ColumnFamilyDescriptor[] backupCfs) {
568    if (currentCfs.length != backupCfs.length) {
569      return false;
570    }
571
572    for (int i = 0; i < backupCfs.length; i++) {
573      String currentCf = currentCfs[i].getNameAsString();
574      String backupCf = backupCfs[i].getNameAsString();
575
576      if (!currentCf.equals(backupCf)) {
577        return false;
578      }
579    }
580
581    return true;
582  }
583}