001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup.util;
019
020import java.io.FileNotFoundException;
021import java.io.IOException;
022import java.net.URLDecoder;
023import java.util.ArrayList;
024import java.util.Collections;
025import java.util.Comparator;
026import java.util.HashMap;
027import java.util.Iterator;
028import java.util.List;
029import java.util.Map;
030import java.util.Map.Entry;
031import java.util.TreeMap;
032import java.util.TreeSet;
033import org.apache.hadoop.conf.Configuration;
034import org.apache.hadoop.fs.FSDataOutputStream;
035import org.apache.hadoop.fs.FileStatus;
036import org.apache.hadoop.fs.FileSystem;
037import org.apache.hadoop.fs.LocatedFileStatus;
038import org.apache.hadoop.fs.Path;
039import org.apache.hadoop.fs.PathFilter;
040import org.apache.hadoop.fs.RemoteIterator;
041import org.apache.hadoop.fs.permission.FsPermission;
042import org.apache.hadoop.hbase.HConstants;
043import org.apache.hadoop.hbase.MetaTableAccessor;
044import org.apache.hadoop.hbase.ServerName;
045import org.apache.hadoop.hbase.TableName;
046import org.apache.hadoop.hbase.backup.BackupInfo;
047import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
048import org.apache.hadoop.hbase.backup.HBackupFileSystem;
049import org.apache.hadoop.hbase.backup.RestoreRequest;
050import org.apache.hadoop.hbase.backup.impl.BackupManifest;
051import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
052import org.apache.hadoop.hbase.client.Admin;
053import org.apache.hadoop.hbase.client.Connection;
054import org.apache.hadoop.hbase.client.RegionInfo;
055import org.apache.hadoop.hbase.client.TableDescriptor;
056import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
057import org.apache.hadoop.hbase.util.CommonFSUtils;
058import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
059import org.apache.hadoop.hbase.util.FSTableDescriptors;
060import org.apache.hadoop.hbase.util.FSUtils;
061import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
062import org.apache.yetus.audience.InterfaceAudience;
063import org.slf4j.Logger;
064import org.slf4j.LoggerFactory;
065
066/**
067 * A collection for methods used by multiple classes to backup HBase tables.
068 */
069@InterfaceAudience.Private
070public final class BackupUtils {
071  protected static final Logger LOG = LoggerFactory.getLogger(BackupUtils.class);
072  public static final String LOGNAME_SEPARATOR = ".";
073  public static final int MILLISEC_IN_HOUR = 3600000;
074
075  private BackupUtils() {
076    throw new AssertionError("Instantiating utility class...");
077  }
078
079  /**
080   * Loop through the RS log timestamp map for the tables, for each RS, find the min timestamp value
081   * for the RS among the tables.
082   * @param rsLogTimestampMap timestamp map
083   * @return the min timestamp of each RS
084   */
085  public static Map<String, Long>
086    getRSLogTimestampMins(Map<TableName, Map<String, Long>> rsLogTimestampMap) {
087    if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) {
088      return null;
089    }
090
091    HashMap<String, Long> rsLogTimestampMins = new HashMap<>();
092    HashMap<String, HashMap<TableName, Long>> rsLogTimestampMapByRS = new HashMap<>();
093
094    for (Entry<TableName, Map<String, Long>> tableEntry : rsLogTimestampMap.entrySet()) {
095      TableName table = tableEntry.getKey();
096      Map<String, Long> rsLogTimestamp = tableEntry.getValue();
097      for (Entry<String, Long> rsEntry : rsLogTimestamp.entrySet()) {
098        String rs = rsEntry.getKey();
099        Long ts = rsEntry.getValue();
100        rsLogTimestampMapByRS.putIfAbsent(rs, new HashMap<>());
101        rsLogTimestampMapByRS.get(rs).put(table, ts);
102      }
103    }
104
105    for (Entry<String, HashMap<TableName, Long>> entry : rsLogTimestampMapByRS.entrySet()) {
106      String rs = entry.getKey();
107      rsLogTimestampMins.put(rs, BackupUtils.getMinValue(entry.getValue()));
108    }
109
110    return rsLogTimestampMins;
111  }
112
113  /**
114   * copy out Table RegionInfo into incremental backup image need to consider move this logic into
115   * HBackupFileSystem
116   * @param conn       connection
117   * @param backupInfo backup info
118   * @param conf       configuration
119   * @throws IOException exception
120   */
121  public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf)
122    throws IOException {
123    Path rootDir = CommonFSUtils.getRootDir(conf);
124    FileSystem fs = rootDir.getFileSystem(conf);
125
126    // for each table in the table set, copy out the table info and region
127    // info files in the correct directory structure
128    try (Admin admin = conn.getAdmin()) {
129      for (TableName table : backupInfo.getTables()) {
130        if (!admin.tableExists(table)) {
131          LOG.warn("Table " + table + " does not exists, skipping it.");
132          continue;
133        }
134        TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
135
136        // write a copy of descriptor to the target directory
137        Path target = new Path(backupInfo.getTableBackupDir(table));
138        FileSystem targetFs = target.getFileSystem(conf);
139        FSTableDescriptors descriptors =
140          new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf));
141        descriptors.createTableDescriptorForTableDirectory(target, orig, false);
142        LOG.debug("Attempting to copy table info for:" + table + " target: " + target
143          + " descriptor: " + orig);
144        LOG.debug("Finished copying tableinfo.");
145        List<RegionInfo> regions = MetaTableAccessor.getTableRegions(conn, table);
146        // For each region, write the region info to disk
147        LOG.debug("Starting to write region info for table " + table);
148        for (RegionInfo regionInfo : regions) {
149          Path regionDir = FSUtils
150            .getRegionDirFromTableDir(new Path(backupInfo.getTableBackupDir(table)), regionInfo);
151          regionDir = new Path(backupInfo.getTableBackupDir(table), regionDir.getName());
152          writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
153        }
154        LOG.debug("Finished writing region info for table " + table);
155      }
156    }
157  }
158
159  /**
160   * Write the .regioninfo file on-disk.
161   */
162  public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs,
163    final Path regionInfoDir, RegionInfo regionInfo) throws IOException {
164    final byte[] content = RegionInfo.toDelimitedByteArray(regionInfo);
165    Path regionInfoFile = new Path(regionInfoDir, "." + HConstants.REGIONINFO_QUALIFIER_STR);
166    // First check to get the permissions
167    FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
168    // Write the RegionInfo file content
169    FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null);
170    try {
171      out.write(content);
172    } finally {
173      out.close();
174    }
175  }
176
177  /**
178   * Parses hostname:port from WAL file path
179   * @param p path to WAL file
180   * @return hostname:port
181   */
182  public static String parseHostNameFromLogFile(Path p) {
183    try {
184      if (AbstractFSWALProvider.isArchivedLogFile(p)) {
185        return BackupUtils.parseHostFromOldLog(p);
186      } else {
187        ServerName sname = AbstractFSWALProvider.getServerNameFromWALDirectoryName(p);
188        if (sname != null) {
189          return sname.getAddress().toString();
190        } else {
191          LOG.error("Skip log file (can't parse): " + p);
192          return null;
193        }
194      }
195    } catch (Exception e) {
196      LOG.error("Skip log file (can't parse): " + p, e);
197      return null;
198    }
199  }
200
201  /**
202   * Returns WAL file name
203   * @param walFileName WAL file name
204   * @return WAL file name
205   */
206  public static String getUniqueWALFileNamePart(String walFileName) {
207    return getUniqueWALFileNamePart(new Path(walFileName));
208  }
209
210  /**
211   * Returns WAL file name
212   * @param p WAL file path
213   * @return WAL file name
214   */
215  public static String getUniqueWALFileNamePart(Path p) {
216    return p.getName();
217  }
218
219  /**
220   * Get the total length of files under the given directory recursively.
221   * @param fs  The hadoop file system
222   * @param dir The target directory
223   * @return the total length of files
224   * @throws IOException exception
225   */
226  public static long getFilesLength(FileSystem fs, Path dir) throws IOException {
227    long totalLength = 0;
228    FileStatus[] files = CommonFSUtils.listStatus(fs, dir);
229    if (files != null) {
230      for (FileStatus fileStatus : files) {
231        if (fileStatus.isDirectory()) {
232          totalLength += getFilesLength(fs, fileStatus.getPath());
233        } else {
234          totalLength += fileStatus.getLen();
235        }
236      }
237    }
238    return totalLength;
239  }
240
241  /**
242   * Get list of all old WAL files (WALs and archive)
243   * @param c                configuration
244   * @param hostTimestampMap {host,timestamp} map
245   * @return list of WAL files
246   * @throws IOException exception
247   */
248  public static List<String> getWALFilesOlderThan(final Configuration c,
249    final HashMap<String, Long> hostTimestampMap) throws IOException {
250    Path walRootDir = CommonFSUtils.getWALRootDir(c);
251    Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
252    Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
253    List<String> logFiles = new ArrayList<>();
254
255    PathFilter filter = p -> {
256      try {
257        if (AbstractFSWALProvider.isMetaFile(p)) {
258          return false;
259        }
260        String host = parseHostNameFromLogFile(p);
261        if (host == null) {
262          return false;
263        }
264        Long oldTimestamp = hostTimestampMap.get(host);
265        Long currentLogTS = BackupUtils.getCreationTime(p);
266        return currentLogTS <= oldTimestamp;
267      } catch (Exception e) {
268        LOG.warn("Can not parse" + p, e);
269        return false;
270      }
271    };
272    FileSystem walFs = CommonFSUtils.getWALFileSystem(c);
273    logFiles = BackupUtils.getFiles(walFs, logDir, logFiles, filter);
274    logFiles = BackupUtils.getFiles(walFs, oldLogDir, logFiles, filter);
275    return logFiles;
276  }
277
278  public static TableName[] parseTableNames(String tables) {
279    if (tables == null) {
280      return null;
281    }
282    String[] tableArray = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
283
284    TableName[] ret = new TableName[tableArray.length];
285    for (int i = 0; i < tableArray.length; i++) {
286      ret[i] = TableName.valueOf(tableArray[i]);
287    }
288    return ret;
289  }
290
291  /**
292   * Check whether the backup path exist
293   * @param backupStr backup
294   * @param conf      configuration
295   * @return Yes if path exists
296   * @throws IOException exception
297   */
298  public static boolean checkPathExist(String backupStr, Configuration conf) throws IOException {
299    boolean isExist = false;
300    Path backupPath = new Path(backupStr);
301    FileSystem fileSys = backupPath.getFileSystem(conf);
302    String targetFsScheme = fileSys.getUri().getScheme();
303    if (LOG.isTraceEnabled()) {
304      LOG.trace("Schema of given url: " + backupStr + " is: " + targetFsScheme);
305    }
306    if (fileSys.exists(backupPath)) {
307      isExist = true;
308    }
309    return isExist;
310  }
311
312  /**
313   * Check target path first, confirm it doesn't exist before backup
314   * @param backupRootPath backup destination path
315   * @param conf           configuration
316   * @throws IOException exception
317   */
318  public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException {
319    boolean targetExists;
320    try {
321      targetExists = checkPathExist(backupRootPath, conf);
322    } catch (IOException e) {
323      String expMsg = e.getMessage();
324      String newMsg = null;
325      if (expMsg.contains("No FileSystem for scheme")) {
326        newMsg =
327          "Unsupported filesystem scheme found in the backup target url. Error Message: " + expMsg;
328        LOG.error(newMsg);
329        throw new IOException(newMsg);
330      } else {
331        throw e;
332      }
333    }
334
335    if (targetExists) {
336      LOG.info("Using existing backup root dir: " + backupRootPath);
337    } else {
338      LOG.info("Backup root dir " + backupRootPath + " does not exist. Will be created.");
339    }
340  }
341
342  /**
343   * Get the min value for all the Values a map.
344   * @param map map
345   * @return the min value
346   */
347  public static <T> Long getMinValue(Map<T, Long> map) {
348    Long minTimestamp = null;
349    if (map != null) {
350      ArrayList<Long> timestampList = new ArrayList<>(map.values());
351      Collections.sort(timestampList);
352      // The min among all the RS log timestamps will be kept in backup system table table.
353      minTimestamp = timestampList.get(0);
354    }
355    return minTimestamp;
356  }
357
358  /**
359   * Parses host name:port from archived WAL path
360   * @param p path
361   * @return host name
362   */
363  public static String parseHostFromOldLog(Path p) {
364    try {
365      String n = p.getName();
366      int idx = n.lastIndexOf(LOGNAME_SEPARATOR);
367      String s = URLDecoder.decode(n.substring(0, idx), "UTF8");
368      return ServerName.valueOf(s).getAddress().toString();
369    } catch (Exception e) {
370      LOG.warn("Skip log file (can't parse): {}", p);
371      return null;
372    }
373  }
374
375  /**
376   * Given the log file, parse the timestamp from the file name. The timestamp is the last number.
377   * @param p a path to the log file
378   * @return the timestamp
379   * @throws IOException exception
380   */
381  public static Long getCreationTime(Path p) throws IOException {
382    int idx = p.getName().lastIndexOf(LOGNAME_SEPARATOR);
383    if (idx < 0) {
384      throw new IOException("Cannot parse timestamp from path " + p);
385    }
386    String ts = p.getName().substring(idx + 1);
387    return Long.parseLong(ts);
388  }
389
390  public static List<String> getFiles(FileSystem fs, Path rootDir, List<String> files,
391    PathFilter filter) throws IOException {
392    RemoteIterator<LocatedFileStatus> it = fs.listFiles(rootDir, true);
393
394    while (it.hasNext()) {
395      LocatedFileStatus lfs = it.next();
396      if (lfs.isDirectory()) {
397        continue;
398      }
399      // apply filter
400      if (filter.accept(lfs.getPath())) {
401        files.add(lfs.getPath().toString());
402      }
403    }
404    return files;
405  }
406
407  public static void cleanupBackupData(BackupInfo context, Configuration conf) throws IOException {
408    cleanupHLogDir(context, conf);
409    cleanupTargetDir(context, conf);
410  }
411
412  /**
413   * Clean up directories which are generated when DistCp copying hlogs
414   * @param backupInfo backup info
415   * @param conf       configuration
416   * @throws IOException exception
417   */
418  private static void cleanupHLogDir(BackupInfo backupInfo, Configuration conf) throws IOException {
419    String logDir = backupInfo.getHLogTargetDir();
420    if (logDir == null) {
421      LOG.warn("No log directory specified for " + backupInfo.getBackupId());
422      return;
423    }
424
425    Path rootPath = new Path(logDir).getParent();
426    FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
427    FileStatus[] files = listStatus(fs, rootPath, null);
428    if (files == null) {
429      return;
430    }
431    for (FileStatus file : files) {
432      LOG.debug("Delete log files: " + file.getPath().getName());
433      fs.delete(file.getPath(), true);
434    }
435  }
436
437  private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) {
438    try {
439      // clean up the data at target directory
440      LOG.debug("Trying to cleanup up target dir : " + backupInfo.getBackupId());
441      String targetDir = backupInfo.getBackupRootDir();
442      if (targetDir == null) {
443        LOG.warn("No target directory specified for " + backupInfo.getBackupId());
444        return;
445      }
446
447      FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
448
449      for (TableName table : backupInfo.getTables()) {
450        Path targetDirPath = new Path(
451          getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
452        if (outputFs.delete(targetDirPath, true)) {
453          LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
454        } else {
455          LOG.info("No data has been found in " + targetDirPath.toString() + ".");
456        }
457
458        Path tableDir = targetDirPath.getParent();
459        FileStatus[] backups = listStatus(outputFs, tableDir, null);
460        if (backups == null || backups.length == 0) {
461          outputFs.delete(tableDir, true);
462          LOG.debug(tableDir.toString() + " is empty, remove it.");
463        }
464      }
465      outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true);
466    } catch (IOException e1) {
467      LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at "
468        + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
469    }
470  }
471
472  /**
473   * Given the backup root dir, backup id and the table name, return the backup image location,
474   * which is also where the backup manifest file is. return value look like:
475   * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/"
476   * @param backupRootDir backup root directory
477   * @param backupId      backup id
478   * @param tableName     table name
479   * @return backupPath String for the particular table
480   */
481  public static String getTableBackupDir(String backupRootDir, String backupId,
482    TableName tableName) {
483    return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
484      + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
485      + Path.SEPARATOR;
486  }
487
488  /**
489   * Sort history list by start time in descending order.
490   * @param historyList history list
491   * @return sorted list of BackupCompleteData
492   */
493  public static ArrayList<BackupInfo> sortHistoryListDesc(ArrayList<BackupInfo> historyList) {
494    ArrayList<BackupInfo> list = new ArrayList<>();
495    TreeMap<String, BackupInfo> map = new TreeMap<>();
496    for (BackupInfo h : historyList) {
497      map.put(Long.toString(h.getStartTs()), h);
498    }
499    Iterator<String> i = map.descendingKeySet().iterator();
500    while (i.hasNext()) {
501      list.add(map.get(i.next()));
502    }
503    return list;
504  }
505
506  /**
507   * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates
508   * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and
509   * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException.
510   * @param fs     file system
511   * @param dir    directory
512   * @param filter path filter
513   * @return null if dir is empty or doesn't exist, otherwise FileStatus array
514   */
515  public static FileStatus[] listStatus(final FileSystem fs, final Path dir,
516    final PathFilter filter) throws IOException {
517    FileStatus[] status = null;
518    try {
519      status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
520    } catch (FileNotFoundException fnfe) {
521      // if directory doesn't exist, return null
522      if (LOG.isTraceEnabled()) {
523        LOG.trace(dir + " doesn't exist");
524      }
525    }
526
527    if (status == null || status.length < 1) {
528      return null;
529    }
530
531    return status;
532  }
533
534  /**
535   * Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the 'path'
536   * component of a Path's URI: e.g. If a Path is
537   * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>, this method returns
538   * <code>/hbase_trunk/TestTable/compaction.dir</code>. This method is useful if you want to print
539   * out a Path without qualifying Filesystem instance.
540   * @param p file system Path whose 'path' component we are to return.
541   * @return Path portion of the Filesystem
542   */
543  public static String getPath(Path p) {
544    return p.toUri().getPath();
545  }
546
547  /**
548   * Given the backup root dir and the backup id, return the log file location for an incremental
549   * backup.
550   * @param backupRootDir backup root directory
551   * @param backupId      backup id
552   * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738"
553   */
554  public static String getLogBackupDir(String backupRootDir, String backupId) {
555    return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
556      + HConstants.HREGION_LOGDIR_NAME;
557  }
558
559  private static List<BackupInfo> getHistory(Configuration conf, Path backupRootPath)
560    throws IOException {
561    // Get all (n) history from backup root destination
562
563    FileSystem fs = FileSystem.get(backupRootPath.toUri(), conf);
564    RemoteIterator<LocatedFileStatus> it = fs.listLocatedStatus(backupRootPath);
565
566    List<BackupInfo> infos = new ArrayList<>();
567    while (it.hasNext()) {
568      LocatedFileStatus lfs = it.next();
569
570      if (!lfs.isDirectory()) {
571        continue;
572      }
573
574      String backupId = lfs.getPath().getName();
575      try {
576        BackupInfo info = loadBackupInfo(backupRootPath, backupId, fs);
577        infos.add(info);
578      } catch (IOException e) {
579        LOG.error("Can not load backup info from: " + lfs.getPath(), e);
580      }
581    }
582    // Sort
583    Collections.sort(infos, new Comparator<BackupInfo>() {
584      @Override
585      public int compare(BackupInfo o1, BackupInfo o2) {
586        long ts1 = getTimestamp(o1.getBackupId());
587        long ts2 = getTimestamp(o2.getBackupId());
588
589        if (ts1 == ts2) {
590          return 0;
591        }
592
593        return ts1 < ts2 ? 1 : -1;
594      }
595
596      private long getTimestamp(String backupId) {
597        String[] split = backupId.split("_");
598        return Long.parseLong(split[1]);
599      }
600    });
601    return infos;
602  }
603
604  public static List<BackupInfo> getHistory(Configuration conf, int n, Path backupRootPath,
605    BackupInfo.Filter... filters) throws IOException {
606    List<BackupInfo> infos = getHistory(conf, backupRootPath);
607    List<BackupInfo> ret = new ArrayList<>();
608    for (BackupInfo info : infos) {
609      if (ret.size() == n) {
610        break;
611      }
612      boolean passed = true;
613      for (int i = 0; i < filters.length; i++) {
614        if (!filters[i].apply(info)) {
615          passed = false;
616          break;
617        }
618      }
619      if (passed) {
620        ret.add(info);
621      }
622    }
623    return ret;
624  }
625
626  public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, FileSystem fs)
627    throws IOException {
628    Path backupPath = new Path(backupRootPath, backupId);
629
630    RemoteIterator<LocatedFileStatus> it = fs.listFiles(backupPath, true);
631    while (it.hasNext()) {
632      LocatedFileStatus lfs = it.next();
633      if (lfs.getPath().getName().equals(BackupManifest.MANIFEST_FILE_NAME)) {
634        // Load BackupManifest
635        BackupManifest manifest = new BackupManifest(fs, lfs.getPath().getParent());
636        BackupInfo info = manifest.toBackupInfo();
637        return info;
638      }
639    }
640    return null;
641  }
642
643  /**
644   * Create restore request.
645   * @param backupRootDir backup root dir
646   * @param backupId      backup id
647   * @param check         check only
648   * @param fromTables    table list from
649   * @param toTables      table list to
650   * @param isOverwrite   overwrite data
651   * @return request obkect
652   */
653  public static RestoreRequest createRestoreRequest(String backupRootDir, String backupId,
654    boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite) {
655    RestoreRequest.Builder builder = new RestoreRequest.Builder();
656    RestoreRequest request =
657      builder.withBackupRootDir(backupRootDir).withBackupId(backupId).withCheck(check)
658        .withFromTables(fromTables).withToTables(toTables).withOvewrite(isOverwrite).build();
659    return request;
660  }
661
662  public static boolean validate(HashMap<TableName, BackupManifest> backupManifestMap,
663    Configuration conf) throws IOException {
664    boolean isValid = true;
665
666    for (Entry<TableName, BackupManifest> manifestEntry : backupManifestMap.entrySet()) {
667      TableName table = manifestEntry.getKey();
668      TreeSet<BackupImage> imageSet = new TreeSet<>();
669
670      ArrayList<BackupImage> depList = manifestEntry.getValue().getDependentListByTable(table);
671      if (depList != null && !depList.isEmpty()) {
672        imageSet.addAll(depList);
673      }
674
675      LOG.info("Dependent image(s) from old to new:");
676      for (BackupImage image : imageSet) {
677        String imageDir =
678          HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table);
679        if (!BackupUtils.checkPathExist(imageDir, conf)) {
680          LOG.error("ERROR: backup image does not exist: " + imageDir);
681          isValid = false;
682          break;
683        }
684        LOG.info("Backup image: " + image.getBackupId() + " for '" + table + "' is available");
685      }
686    }
687    return isValid;
688  }
689
690  public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit)
691    throws IOException {
692    FileSystem fs = FileSystem.get(conf);
693    String tmp =
694      conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + "/hbase-staging");
695    Path path = new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-"
696      + EnvironmentEdgeManager.currentTime());
697    if (deleteOnExit) {
698      fs.deleteOnExit(path);
699    }
700    return path;
701  }
702
703  public static Path getBulkOutputDir(String tableName, Configuration conf) throws IOException {
704    return getBulkOutputDir(tableName, conf, true);
705  }
706
707  public static String getFileNameCompatibleString(TableName table) {
708    return table.getNamespaceAsString() + "-" + table.getQualifierAsString();
709  }
710
711  public static boolean failed(int result) {
712    return result != 0;
713  }
714
715  public static boolean succeeded(int result) {
716    return result == 0;
717  }
718
719  public static BulkLoadHFiles createLoader(Configuration config) {
720    // set configuration for restore:
721    // LoadIncrementalHFile needs more time
722    // <name>hbase.rpc.timeout</name> <value>600000</value>
723    // calculates
724    Configuration conf = new Configuration(config);
725    conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, MILLISEC_IN_HOUR);
726
727    // By default, it is 32 and loader will fail if # of files in any region exceed this
728    // limit. Bad for snapshot restore.
729    conf.setInt(BulkLoadHFiles.MAX_FILES_PER_REGION_PER_FAMILY, Integer.MAX_VALUE);
730    conf.set(BulkLoadHFiles.IGNORE_UNMATCHED_CF_CONF_KEY, "yes");
731    return BulkLoadHFiles.create(conf);
732  }
733
734  public static String findMostRecentBackupId(String[] backupIds) {
735    long recentTimestamp = Long.MIN_VALUE;
736    for (String backupId : backupIds) {
737      long ts = Long.parseLong(backupId.split("_")[1]);
738      if (ts > recentTimestamp) {
739        recentTimestamp = ts;
740      }
741    }
742    return BackupRestoreConstants.BACKUPID_PREFIX + recentTimestamp;
743  }
744
745}