001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup.util;
019
020import java.io.FileNotFoundException;
021import java.io.IOException;
022import java.net.URLDecoder;
023import java.util.ArrayList;
024import java.util.Collections;
025import java.util.Comparator;
026import java.util.HashMap;
027import java.util.Iterator;
028import java.util.List;
029import java.util.Map;
030import java.util.Map.Entry;
031import java.util.TreeMap;
032import java.util.TreeSet;
033import org.apache.hadoop.conf.Configuration;
034import org.apache.hadoop.fs.FSDataOutputStream;
035import org.apache.hadoop.fs.FileStatus;
036import org.apache.hadoop.fs.FileSystem;
037import org.apache.hadoop.fs.LocatedFileStatus;
038import org.apache.hadoop.fs.Path;
039import org.apache.hadoop.fs.PathFilter;
040import org.apache.hadoop.fs.RemoteIterator;
041import org.apache.hadoop.fs.permission.FsPermission;
042import org.apache.hadoop.hbase.HConstants;
043import org.apache.hadoop.hbase.MetaTableAccessor;
044import org.apache.hadoop.hbase.ServerName;
045import org.apache.hadoop.hbase.TableName;
046import org.apache.hadoop.hbase.backup.BackupInfo;
047import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
048import org.apache.hadoop.hbase.backup.HBackupFileSystem;
049import org.apache.hadoop.hbase.backup.RestoreRequest;
050import org.apache.hadoop.hbase.backup.impl.BackupManifest;
051import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
052import org.apache.hadoop.hbase.client.Admin;
053import org.apache.hadoop.hbase.client.Connection;
054import org.apache.hadoop.hbase.client.RegionInfo;
055import org.apache.hadoop.hbase.client.TableDescriptor;
056import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
057import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
058import org.apache.hadoop.hbase.util.CommonFSUtils;
059import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
060import org.apache.hadoop.hbase.util.FSTableDescriptors;
061import org.apache.hadoop.hbase.util.FSUtils;
062import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
063import org.apache.yetus.audience.InterfaceAudience;
064import org.slf4j.Logger;
065import org.slf4j.LoggerFactory;
066
067import org.apache.hbase.thirdparty.com.google.common.base.Splitter;
068import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
069import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
070
071/**
072 * A collection for methods used by multiple classes to backup HBase tables.
073 */
074@InterfaceAudience.Private
075public final class BackupUtils {
076  private static final Logger LOG = LoggerFactory.getLogger(BackupUtils.class);
077  public static final String LOGNAME_SEPARATOR = ".";
078  public static final int MILLISEC_IN_HOUR = 3600000;
079
080  private BackupUtils() {
081    throw new AssertionError("Instantiating utility class...");
082  }
083
084  /**
085   * Loop through the RS log timestamp map for the tables, for each RS, find the min timestamp value
086   * for the RS among the tables.
087   * @param rsLogTimestampMap timestamp map
088   * @return the min timestamp of each RS
089   */
090  public static Map<String, Long>
091    getRSLogTimestampMins(Map<TableName, Map<String, Long>> rsLogTimestampMap) {
092    if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) {
093      return null;
094    }
095
096    HashMap<String, Long> rsLogTimestampMins = new HashMap<>();
097    HashMap<String, HashMap<TableName, Long>> rsLogTimestampMapByRS = new HashMap<>();
098
099    for (Entry<TableName, Map<String, Long>> tableEntry : rsLogTimestampMap.entrySet()) {
100      TableName table = tableEntry.getKey();
101      Map<String, Long> rsLogTimestamp = tableEntry.getValue();
102      for (Entry<String, Long> rsEntry : rsLogTimestamp.entrySet()) {
103        String rs = rsEntry.getKey();
104        Long ts = rsEntry.getValue();
105        rsLogTimestampMapByRS.putIfAbsent(rs, new HashMap<>());
106        rsLogTimestampMapByRS.get(rs).put(table, ts);
107      }
108    }
109
110    for (Entry<String, HashMap<TableName, Long>> entry : rsLogTimestampMapByRS.entrySet()) {
111      String rs = entry.getKey();
112      rsLogTimestampMins.put(rs, BackupUtils.getMinValue(entry.getValue()));
113    }
114
115    return rsLogTimestampMins;
116  }
117
118  /**
119   * copy out Table RegionInfo into incremental backup image need to consider move this logic into
120   * HBackupFileSystem
121   * @param conn       connection
122   * @param backupInfo backup info
123   * @param conf       configuration
124   * @throws IOException exception
125   */
126  public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf)
127    throws IOException {
128    Path rootDir = CommonFSUtils.getRootDir(conf);
129    FileSystem fs = rootDir.getFileSystem(conf);
130
131    // for each table in the table set, copy out the table info and region
132    // info files in the correct directory structure
133    try (Admin admin = conn.getAdmin()) {
134      for (TableName table : backupInfo.getTables()) {
135        if (!admin.tableExists(table)) {
136          LOG.warn("Table " + table + " does not exists, skipping it.");
137          continue;
138        }
139        TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
140
141        // write a copy of descriptor to the target directory
142        Path target = new Path(backupInfo.getTableBackupDir(table));
143        FileSystem targetFs = target.getFileSystem(conf);
144        try (FSTableDescriptors descriptors =
145          new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf))) {
146          descriptors.createTableDescriptorForTableDirectory(target, orig, false);
147        }
148        LOG.debug("Attempting to copy table info for:" + table + " target: " + target
149          + " descriptor: " + orig);
150        LOG.debug("Finished copying tableinfo.");
151        List<RegionInfo> regions = MetaTableAccessor.getTableRegions(conn, table);
152        // For each region, write the region info to disk
153        LOG.debug("Starting to write region info for table " + table);
154        for (RegionInfo regionInfo : regions) {
155          Path regionDir = FSUtils
156            .getRegionDirFromTableDir(new Path(backupInfo.getTableBackupDir(table)), regionInfo);
157          regionDir = new Path(backupInfo.getTableBackupDir(table), regionDir.getName());
158          writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
159        }
160        LOG.debug("Finished writing region info for table " + table);
161      }
162    }
163  }
164
165  /**
166   * Write the .regioninfo file on-disk.
167   */
168  public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs,
169    final Path regionInfoDir, RegionInfo regionInfo) throws IOException {
170    final byte[] content = RegionInfo.toDelimitedByteArray(regionInfo);
171    Path regionInfoFile = new Path(regionInfoDir, "." + HConstants.REGIONINFO_QUALIFIER_STR);
172    // First check to get the permissions
173    FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
174    // Write the RegionInfo file content
175    FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null);
176    try {
177      out.write(content);
178    } finally {
179      out.close();
180    }
181  }
182
183  /**
184   * Parses hostname:port from WAL file path
185   * @param p path to WAL file
186   * @return hostname:port
187   */
188  public static String parseHostNameFromLogFile(Path p) {
189    try {
190      if (AbstractFSWALProvider.isArchivedLogFile(p)) {
191        return BackupUtils.parseHostFromOldLog(p);
192      } else {
193        ServerName sname = AbstractFSWALProvider.getServerNameFromWALDirectoryName(p);
194        if (sname != null) {
195          return sname.getAddress().toString();
196        } else {
197          LOG.error("Skip log file (can't parse): " + p);
198          return null;
199        }
200      }
201    } catch (Exception e) {
202      LOG.error("Skip log file (can't parse): " + p, e);
203      return null;
204    }
205  }
206
207  /**
208   * Returns WAL file name
209   * @param walFileName WAL file name
210   * @return WAL file name
211   */
212  public static String getUniqueWALFileNamePart(String walFileName) {
213    return getUniqueWALFileNamePart(new Path(walFileName));
214  }
215
216  /**
217   * Returns WAL file name
218   * @param p WAL file path
219   * @return WAL file name
220   */
221  public static String getUniqueWALFileNamePart(Path p) {
222    return p.getName();
223  }
224
225  /**
226   * Get the total length of files under the given directory recursively.
227   * @param fs  The hadoop file system
228   * @param dir The target directory
229   * @return the total length of files
230   * @throws IOException exception
231   */
232  public static long getFilesLength(FileSystem fs, Path dir) throws IOException {
233    long totalLength = 0;
234    FileStatus[] files = CommonFSUtils.listStatus(fs, dir);
235    if (files != null) {
236      for (FileStatus fileStatus : files) {
237        if (fileStatus.isDirectory()) {
238          totalLength += getFilesLength(fs, fileStatus.getPath());
239        } else {
240          totalLength += fileStatus.getLen();
241        }
242      }
243    }
244    return totalLength;
245  }
246
247  /**
248   * Get list of all old WAL files (WALs and archive)
249   * @param c                configuration
250   * @param hostTimestampMap {host,timestamp} map
251   * @return list of WAL files
252   * @throws IOException exception
253   */
254  public static List<String> getWALFilesOlderThan(final Configuration c,
255    final HashMap<String, Long> hostTimestampMap) throws IOException {
256    Path walRootDir = CommonFSUtils.getWALRootDir(c);
257    Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
258    Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
259    List<String> logFiles = new ArrayList<>();
260
261    PathFilter filter = p -> {
262      try {
263        if (AbstractFSWALProvider.isMetaFile(p)) {
264          return false;
265        }
266        String host = parseHostNameFromLogFile(p);
267        if (host == null) {
268          return false;
269        }
270        Long oldTimestamp = hostTimestampMap.get(host);
271        Long currentLogTS = BackupUtils.getCreationTime(p);
272        return currentLogTS <= oldTimestamp;
273      } catch (Exception e) {
274        LOG.warn("Can not parse" + p, e);
275        return false;
276      }
277    };
278    FileSystem walFs = CommonFSUtils.getWALFileSystem(c);
279    logFiles = BackupUtils.getFiles(walFs, logDir, logFiles, filter);
280    logFiles = BackupUtils.getFiles(walFs, oldLogDir, logFiles, filter);
281    return logFiles;
282  }
283
284  public static TableName[] parseTableNames(String tables) {
285    if (tables == null) {
286      return null;
287    }
288    return Splitter.on(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND).splitToStream(tables)
289      .map(TableName::valueOf).toArray(TableName[]::new);
290  }
291
292  /**
293   * Check whether the backup path exist
294   * @param backupStr backup
295   * @param conf      configuration
296   * @return Yes if path exists
297   * @throws IOException exception
298   */
299  public static boolean checkPathExist(String backupStr, Configuration conf) throws IOException {
300    boolean isExist = false;
301    Path backupPath = new Path(backupStr);
302    FileSystem fileSys = backupPath.getFileSystem(conf);
303    String targetFsScheme = fileSys.getUri().getScheme();
304    if (LOG.isTraceEnabled()) {
305      LOG.trace("Schema of given url: " + backupStr + " is: " + targetFsScheme);
306    }
307    if (fileSys.exists(backupPath)) {
308      isExist = true;
309    }
310    return isExist;
311  }
312
313  /**
314   * Check target path first, confirm it doesn't exist before backup
315   * @param backupRootPath backup destination path
316   * @param conf           configuration
317   * @throws IOException exception
318   */
319  public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException {
320    boolean targetExists;
321    try {
322      targetExists = checkPathExist(backupRootPath, conf);
323    } catch (IOException e) {
324      String expMsg = e.getMessage();
325      String newMsg = null;
326      if (expMsg.contains("No FileSystem for scheme")) {
327        newMsg =
328          "Unsupported filesystem scheme found in the backup target url. Error Message: " + expMsg;
329        LOG.error(newMsg);
330        throw new IOException(newMsg);
331      } else {
332        throw e;
333      }
334    }
335
336    if (targetExists) {
337      LOG.info("Using existing backup root dir: " + backupRootPath);
338    } else {
339      LOG.info("Backup root dir " + backupRootPath + " does not exist. Will be created.");
340    }
341  }
342
343  /**
344   * Get the min value for all the Values a map.
345   * @param map map
346   * @return the min value
347   */
348  public static <T> Long getMinValue(Map<T, Long> map) {
349    Long minTimestamp = null;
350    if (map != null) {
351      ArrayList<Long> timestampList = new ArrayList<>(map.values());
352      Collections.sort(timestampList);
353      // The min among all the RS log timestamps will be kept in backup system table table.
354      minTimestamp = timestampList.get(0);
355    }
356    return minTimestamp;
357  }
358
359  /**
360   * Parses host name:port from archived WAL path
361   * @param p path
362   * @return host name
363   */
364  public static String parseHostFromOldLog(Path p) {
365    // Skip master wals
366    if (p.getName().endsWith(MasterRegionFactory.ARCHIVED_WAL_SUFFIX)) {
367      return null;
368    }
369    try {
370      String urlDecodedName = URLDecoder.decode(p.getName(), "UTF8");
371      Iterable<String> nameSplitsOnComma = Splitter.on(",").split(urlDecodedName);
372      String host = Iterables.get(nameSplitsOnComma, 0);
373      String port = Iterables.get(nameSplitsOnComma, 1);
374      return host + ":" + port;
375    } catch (Exception e) {
376      LOG.warn("Skip log file (can't parse): {}", p);
377      return null;
378    }
379  }
380
381  /**
382   * Given the log file, parse the timestamp from the file name. The timestamp is the last number.
383   * @param p a path to the log file
384   * @return the timestamp
385   * @throws IOException exception
386   */
387  public static Long getCreationTime(Path p) throws IOException {
388    int idx = p.getName().lastIndexOf(LOGNAME_SEPARATOR);
389    if (idx < 0) {
390      throw new IOException("Cannot parse timestamp from path " + p);
391    }
392    String ts = p.getName().substring(idx + 1);
393    return Long.parseLong(ts);
394  }
395
396  public static List<String> getFiles(FileSystem fs, Path rootDir, List<String> files,
397    PathFilter filter) throws IOException {
398    RemoteIterator<LocatedFileStatus> it = fs.listFiles(rootDir, true);
399
400    while (it.hasNext()) {
401      LocatedFileStatus lfs = it.next();
402      if (lfs.isDirectory()) {
403        continue;
404      }
405      // apply filter
406      if (filter.accept(lfs.getPath())) {
407        files.add(lfs.getPath().toString());
408      }
409    }
410    return files;
411  }
412
413  public static void cleanupBackupData(BackupInfo context, Configuration conf) throws IOException {
414    cleanupHLogDir(context, conf);
415    cleanupTargetDir(context, conf);
416  }
417
418  /**
419   * Clean up directories which are generated when DistCp copying hlogs
420   * @param backupInfo backup info
421   * @param conf       configuration
422   * @throws IOException exception
423   */
424  private static void cleanupHLogDir(BackupInfo backupInfo, Configuration conf) throws IOException {
425    String logDir = backupInfo.getHLogTargetDir();
426    if (logDir == null) {
427      LOG.warn("No log directory specified for " + backupInfo.getBackupId());
428      return;
429    }
430
431    Path rootPath = new Path(logDir).getParent();
432    FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
433    FileStatus[] files = listStatus(fs, rootPath, null);
434    if (files == null) {
435      return;
436    }
437    for (FileStatus file : files) {
438      LOG.debug("Delete log files: " + file.getPath().getName());
439      fs.delete(file.getPath(), true);
440    }
441  }
442
443  private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) {
444    try {
445      // clean up the data at target directory
446      LOG.debug("Trying to cleanup up target dir : " + backupInfo.getBackupId());
447      String targetDir = backupInfo.getBackupRootDir();
448      if (targetDir == null) {
449        LOG.warn("No target directory specified for " + backupInfo.getBackupId());
450        return;
451      }
452
453      FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
454
455      for (TableName table : backupInfo.getTables()) {
456        Path targetDirPath = new Path(
457          getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
458        if (outputFs.delete(targetDirPath, true)) {
459          LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
460        } else {
461          LOG.info("No data has been found in " + targetDirPath.toString() + ".");
462        }
463
464        Path tableDir = targetDirPath.getParent();
465        FileStatus[] backups = listStatus(outputFs, tableDir, null);
466        if (backups == null || backups.length == 0) {
467          outputFs.delete(tableDir, true);
468          LOG.debug(tableDir.toString() + " is empty, remove it.");
469        }
470      }
471      outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true);
472    } catch (IOException e1) {
473      LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at "
474        + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
475    }
476  }
477
478  /**
479   * Given the backup root dir, backup id and the table name, return the backup image location,
480   * which is also where the backup manifest file is. return value look like:
481   * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/"
482   * @param backupRootDir backup root directory
483   * @param backupId      backup id
484   * @param tableName     table name
485   * @return backupPath String for the particular table
486   */
487  public static String getTableBackupDir(String backupRootDir, String backupId,
488    TableName tableName) {
489    return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
490      + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
491      + Path.SEPARATOR;
492  }
493
494  /**
495   * Sort history list by start time in descending order.
496   * @param historyList history list
497   * @return sorted list of BackupCompleteData
498   */
499  public static ArrayList<BackupInfo> sortHistoryListDesc(ArrayList<BackupInfo> historyList) {
500    ArrayList<BackupInfo> list = new ArrayList<>();
501    TreeMap<String, BackupInfo> map = new TreeMap<>();
502    for (BackupInfo h : historyList) {
503      map.put(Long.toString(h.getStartTs()), h);
504    }
505    Iterator<String> i = map.descendingKeySet().iterator();
506    while (i.hasNext()) {
507      list.add(map.get(i.next()));
508    }
509    return list;
510  }
511
512  /**
513   * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates
514   * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and
515   * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException.
516   * @param fs     file system
517   * @param dir    directory
518   * @param filter path filter
519   * @return null if dir is empty or doesn't exist, otherwise FileStatus array
520   */
521  public static FileStatus[] listStatus(final FileSystem fs, final Path dir,
522    final PathFilter filter) throws IOException {
523    FileStatus[] status = null;
524    try {
525      status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
526    } catch (FileNotFoundException fnfe) {
527      // if directory doesn't exist, return null
528      if (LOG.isTraceEnabled()) {
529        LOG.trace(dir + " doesn't exist");
530      }
531    }
532
533    if (status == null || status.length < 1) {
534      return null;
535    }
536
537    return status;
538  }
539
540  /**
541   * Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the 'path'
542   * component of a Path's URI: e.g. If a Path is
543   * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>, this method returns
544   * <code>/hbase_trunk/TestTable/compaction.dir</code>. This method is useful if you want to print
545   * out a Path without qualifying Filesystem instance.
546   * @param p file system Path whose 'path' component we are to return.
547   * @return Path portion of the Filesystem
548   */
549  public static String getPath(Path p) {
550    return p.toUri().getPath();
551  }
552
553  /**
554   * Given the backup root dir and the backup id, return the log file location for an incremental
555   * backup.
556   * @param backupRootDir backup root directory
557   * @param backupId      backup id
558   * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738"
559   */
560  public static String getLogBackupDir(String backupRootDir, String backupId) {
561    return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
562      + HConstants.HREGION_LOGDIR_NAME;
563  }
564
565  private static List<BackupInfo> getHistory(Configuration conf, Path backupRootPath)
566    throws IOException {
567    // Get all (n) history from backup root destination
568
569    FileSystem fs = FileSystem.get(backupRootPath.toUri(), conf);
570    RemoteIterator<LocatedFileStatus> it = fs.listLocatedStatus(backupRootPath);
571
572    List<BackupInfo> infos = new ArrayList<>();
573    while (it.hasNext()) {
574      LocatedFileStatus lfs = it.next();
575
576      if (!lfs.isDirectory()) {
577        continue;
578      }
579
580      String backupId = lfs.getPath().getName();
581      try {
582        BackupInfo info = loadBackupInfo(backupRootPath, backupId, fs);
583        infos.add(info);
584      } catch (IOException e) {
585        LOG.error("Can not load backup info from: " + lfs.getPath(), e);
586      }
587    }
588    // Sort
589    Collections.sort(infos, new Comparator<BackupInfo>() {
590      @Override
591      public int compare(BackupInfo o1, BackupInfo o2) {
592        long ts1 = getTimestamp(o1.getBackupId());
593        long ts2 = getTimestamp(o2.getBackupId());
594
595        if (ts1 == ts2) {
596          return 0;
597        }
598
599        return ts1 < ts2 ? 1 : -1;
600      }
601
602      private long getTimestamp(String backupId) {
603        return Long.parseLong(Iterators.get(Splitter.on('_').split(backupId).iterator(), 1));
604      }
605    });
606    return infos;
607  }
608
609  public static List<BackupInfo> getHistory(Configuration conf, int n, Path backupRootPath,
610    BackupInfo.Filter... filters) throws IOException {
611    List<BackupInfo> infos = getHistory(conf, backupRootPath);
612    List<BackupInfo> ret = new ArrayList<>();
613    for (BackupInfo info : infos) {
614      if (ret.size() == n) {
615        break;
616      }
617      boolean passed = true;
618      for (int i = 0; i < filters.length; i++) {
619        if (!filters[i].apply(info)) {
620          passed = false;
621          break;
622        }
623      }
624      if (passed) {
625        ret.add(info);
626      }
627    }
628    return ret;
629  }
630
631  public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, FileSystem fs)
632    throws IOException {
633    Path backupPath = new Path(backupRootPath, backupId);
634
635    RemoteIterator<LocatedFileStatus> it = fs.listFiles(backupPath, true);
636    while (it.hasNext()) {
637      LocatedFileStatus lfs = it.next();
638      if (lfs.getPath().getName().equals(BackupManifest.MANIFEST_FILE_NAME)) {
639        // Load BackupManifest
640        BackupManifest manifest = new BackupManifest(fs, lfs.getPath().getParent());
641        BackupInfo info = manifest.toBackupInfo();
642        return info;
643      }
644    }
645    return null;
646  }
647
648  /**
649   * Create restore request.
650   * @param backupRootDir backup root dir
651   * @param backupId      backup id
652   * @param check         check only
653   * @param fromTables    table list from
654   * @param toTables      table list to
655   * @param isOverwrite   overwrite data
656   * @return request obkect
657   */
658  public static RestoreRequest createRestoreRequest(String backupRootDir, String backupId,
659    boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite) {
660    RestoreRequest.Builder builder = new RestoreRequest.Builder();
661    RestoreRequest request =
662      builder.withBackupRootDir(backupRootDir).withBackupId(backupId).withCheck(check)
663        .withFromTables(fromTables).withToTables(toTables).withOvewrite(isOverwrite).build();
664    return request;
665  }
666
667  public static boolean validate(HashMap<TableName, BackupManifest> backupManifestMap,
668    Configuration conf) throws IOException {
669    boolean isValid = true;
670
671    for (Entry<TableName, BackupManifest> manifestEntry : backupManifestMap.entrySet()) {
672      TableName table = manifestEntry.getKey();
673      TreeSet<BackupImage> imageSet = new TreeSet<>();
674
675      ArrayList<BackupImage> depList = manifestEntry.getValue().getDependentListByTable(table);
676      if (depList != null && !depList.isEmpty()) {
677        imageSet.addAll(depList);
678      }
679
680      LOG.info("Dependent image(s) from old to new:");
681      for (BackupImage image : imageSet) {
682        String imageDir =
683          HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table);
684        if (!BackupUtils.checkPathExist(imageDir, conf)) {
685          LOG.error("ERROR: backup image does not exist: " + imageDir);
686          isValid = false;
687          break;
688        }
689        LOG.info("Backup image: " + image.getBackupId() + " for '" + table + "' is available");
690      }
691    }
692    return isValid;
693  }
694
695  public static Path getBulkOutputDir(Path restoreRootDir, String tableName, Configuration conf,
696    boolean deleteOnExit) throws IOException {
697    FileSystem fs = restoreRootDir.getFileSystem(conf);
698    Path path = new Path(restoreRootDir,
699      "bulk_output-" + tableName + "-" + EnvironmentEdgeManager.currentTime());
700    if (deleteOnExit) {
701      fs.deleteOnExit(path);
702    }
703    return path;
704  }
705
706  public static Path getBulkOutputDir(Path restoreRootDir, String tableName, Configuration conf)
707    throws IOException {
708    return getBulkOutputDir(restoreRootDir, tableName, conf, true);
709  }
710
711  public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit)
712    throws IOException {
713    FileSystem fs = FileSystem.get(conf);
714    return getBulkOutputDir(getTmpRestoreOutputDir(fs, conf), tableName, conf, deleteOnExit);
715  }
716
717  /**
718   * Build temporary output path
719   * @param fs   filesystem for default output dir
720   * @param conf configuration
721   * @return output path
722   */
723  public static Path getTmpRestoreOutputDir(FileSystem fs, Configuration conf) {
724    String tmp =
725      conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + "/hbase-staging");
726    return new Path(tmp);
727  }
728
729  public static String getFileNameCompatibleString(TableName table) {
730    return table.getNamespaceAsString() + "-" + table.getQualifierAsString();
731  }
732
733  public static boolean failed(int result) {
734    return result != 0;
735  }
736
737  public static boolean succeeded(int result) {
738    return result == 0;
739  }
740
741  public static BulkLoadHFiles createLoader(Configuration config) {
742    // set configuration for restore:
743    // LoadIncrementalHFile needs more time
744    // <name>hbase.rpc.timeout</name> <value>600000</value>
745    // calculates
746    Configuration conf = new Configuration(config);
747    conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, MILLISEC_IN_HOUR);
748
749    // By default, it is 32 and loader will fail if # of files in any region exceed this
750    // limit. Bad for snapshot restore.
751    conf.setInt(BulkLoadHFiles.MAX_FILES_PER_REGION_PER_FAMILY, Integer.MAX_VALUE);
752    conf.set(BulkLoadHFiles.IGNORE_UNMATCHED_CF_CONF_KEY, "yes");
753    return BulkLoadHFiles.create(conf);
754  }
755
756  public static String findMostRecentBackupId(String[] backupIds) {
757    long recentTimestamp = Long.MIN_VALUE;
758    for (String backupId : backupIds) {
759      long ts = Long.parseLong(Iterators.get(Splitter.on('_').split(backupId).iterator(), 1));
760      if (ts > recentTimestamp) {
761        recentTimestamp = ts;
762      }
763    }
764    return BackupRestoreConstants.BACKUPID_PREFIX + recentTimestamp;
765  }
766
767}