001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup.util;
019
020import java.io.FileNotFoundException;
021import java.io.IOException;
022import java.net.URLDecoder;
023import java.util.ArrayList;
024import java.util.Collections;
025import java.util.Comparator;
026import java.util.HashMap;
027import java.util.Iterator;
028import java.util.List;
029import java.util.Map;
030import java.util.Map.Entry;
031import java.util.TreeMap;
032import java.util.TreeSet;
033import org.apache.hadoop.conf.Configuration;
034import org.apache.hadoop.fs.FSDataOutputStream;
035import org.apache.hadoop.fs.FileStatus;
036import org.apache.hadoop.fs.FileSystem;
037import org.apache.hadoop.fs.LocatedFileStatus;
038import org.apache.hadoop.fs.Path;
039import org.apache.hadoop.fs.PathFilter;
040import org.apache.hadoop.fs.RemoteIterator;
041import org.apache.hadoop.fs.permission.FsPermission;
042import org.apache.hadoop.hbase.HConstants;
043import org.apache.hadoop.hbase.MetaTableAccessor;
044import org.apache.hadoop.hbase.ServerName;
045import org.apache.hadoop.hbase.TableName;
046import org.apache.hadoop.hbase.backup.BackupInfo;
047import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
048import org.apache.hadoop.hbase.backup.HBackupFileSystem;
049import org.apache.hadoop.hbase.backup.RestoreRequest;
050import org.apache.hadoop.hbase.backup.impl.BackupManifest;
051import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
052import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
053import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
054import org.apache.hadoop.hbase.client.Admin;
055import org.apache.hadoop.hbase.client.Connection;
056import org.apache.hadoop.hbase.client.RegionInfo;
057import org.apache.hadoop.hbase.client.TableDescriptor;
058import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
059import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
060import org.apache.hadoop.hbase.util.CommonFSUtils;
061import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
062import org.apache.hadoop.hbase.util.FSTableDescriptors;
063import org.apache.hadoop.hbase.util.FSUtils;
064import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
065import org.apache.yetus.audience.InterfaceAudience;
066import org.slf4j.Logger;
067import org.slf4j.LoggerFactory;
068
069import org.apache.hbase.thirdparty.com.google.common.base.Splitter;
070import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
071import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
072import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
073
074/**
075 * A collection for methods used by multiple classes to backup HBase tables.
076 */
077@InterfaceAudience.Private
078public final class BackupUtils {
079  private static final Logger LOG = LoggerFactory.getLogger(BackupUtils.class);
080  public static final String LOGNAME_SEPARATOR = ".";
081  public static final int MILLISEC_IN_HOUR = 3600000;
082
083  private BackupUtils() {
084    throw new AssertionError("Instantiating utility class...");
085  }
086
087  /**
088   * Loop through the RS log timestamp map for the tables, for each RS, find the min timestamp value
089   * for the RS among the tables.
090   * @param rsLogTimestampMap timestamp map
091   * @return the min timestamp of each RS
092   */
093  public static Map<String, Long>
094    getRSLogTimestampMins(Map<TableName, Map<String, Long>> rsLogTimestampMap) {
095    if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) {
096      return null;
097    }
098
099    HashMap<String, Long> rsLogTimestampMins = new HashMap<>();
100    HashMap<String, HashMap<TableName, Long>> rsLogTimestampMapByRS = new HashMap<>();
101
102    for (Entry<TableName, Map<String, Long>> tableEntry : rsLogTimestampMap.entrySet()) {
103      TableName table = tableEntry.getKey();
104      Map<String, Long> rsLogTimestamp = tableEntry.getValue();
105      for (Entry<String, Long> rsEntry : rsLogTimestamp.entrySet()) {
106        String rs = rsEntry.getKey();
107        Long ts = rsEntry.getValue();
108        rsLogTimestampMapByRS.putIfAbsent(rs, new HashMap<>());
109        rsLogTimestampMapByRS.get(rs).put(table, ts);
110      }
111    }
112
113    for (Entry<String, HashMap<TableName, Long>> entry : rsLogTimestampMapByRS.entrySet()) {
114      String rs = entry.getKey();
115      rsLogTimestampMins.put(rs, BackupUtils.getMinValue(entry.getValue()));
116    }
117
118    return rsLogTimestampMins;
119  }
120
121  /**
122   * copy out Table RegionInfo into incremental backup image need to consider move this logic into
123   * HBackupFileSystem
124   * @param conn       connection
125   * @param backupInfo backup info
126   * @param conf       configuration
127   * @throws IOException exception
128   */
129  public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf)
130    throws IOException {
131    Path rootDir = CommonFSUtils.getRootDir(conf);
132    FileSystem fs = rootDir.getFileSystem(conf);
133
134    // for each table in the table set, copy out the table info and region
135    // info files in the correct directory structure
136    try (Admin admin = conn.getAdmin()) {
137      for (TableName table : backupInfo.getTables()) {
138        if (!admin.tableExists(table)) {
139          LOG.warn("Table " + table + " does not exists, skipping it.");
140          continue;
141        }
142        TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
143
144        // write a copy of descriptor to the target directory
145        Path target = new Path(backupInfo.getTableBackupDir(table));
146        FileSystem targetFs = target.getFileSystem(conf);
147        try (FSTableDescriptors descriptors =
148          new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf))) {
149          descriptors.createTableDescriptorForTableDirectory(target, orig, false);
150        }
151        LOG.debug("Attempting to copy table info for:" + table + " target: " + target
152          + " descriptor: " + orig);
153        LOG.debug("Finished copying tableinfo.");
154        List<RegionInfo> regions = MetaTableAccessor.getTableRegions(conn, table);
155        // For each region, write the region info to disk
156        LOG.debug("Starting to write region info for table " + table);
157        for (RegionInfo regionInfo : regions) {
158          Path regionDir = FSUtils
159            .getRegionDirFromTableDir(new Path(backupInfo.getTableBackupDir(table)), regionInfo);
160          regionDir = new Path(backupInfo.getTableBackupDir(table), regionDir.getName());
161          writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
162        }
163        LOG.debug("Finished writing region info for table " + table);
164      }
165    }
166  }
167
168  /**
169   * Write the .regioninfo file on-disk.
170   */
171  public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs,
172    final Path regionInfoDir, RegionInfo regionInfo) throws IOException {
173    final byte[] content = RegionInfo.toDelimitedByteArray(regionInfo);
174    Path regionInfoFile = new Path(regionInfoDir, "." + HConstants.REGIONINFO_QUALIFIER_STR);
175    // First check to get the permissions
176    FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
177    // Write the RegionInfo file content
178    FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null);
179    try {
180      out.write(content);
181    } finally {
182      out.close();
183    }
184  }
185
186  /**
187   * Parses hostname:port from WAL file path
188   * @param p path to WAL file
189   * @return hostname:port
190   */
191  public static String parseHostNameFromLogFile(Path p) {
192    try {
193      if (AbstractFSWALProvider.isArchivedLogFile(p)) {
194        return BackupUtils.parseHostFromOldLog(p);
195      } else {
196        ServerName sname = AbstractFSWALProvider.getServerNameFromWALDirectoryName(p);
197        if (sname != null) {
198          return sname.getAddress().toString();
199        } else {
200          LOG.error("Skip log file (can't parse): " + p);
201          return null;
202        }
203      }
204    } catch (Exception e) {
205      LOG.error("Skip log file (can't parse): " + p, e);
206      return null;
207    }
208  }
209
210  /**
211   * Returns WAL file name
212   * @param walFileName WAL file name
213   * @return WAL file name
214   */
215  public static String getUniqueWALFileNamePart(String walFileName) {
216    return getUniqueWALFileNamePart(new Path(walFileName));
217  }
218
219  /**
220   * Returns WAL file name
221   * @param p WAL file path
222   * @return WAL file name
223   */
224  public static String getUniqueWALFileNamePart(Path p) {
225    return p.getName();
226  }
227
228  /**
229   * Get the total length of files under the given directory recursively.
230   * @param fs  The hadoop file system
231   * @param dir The target directory
232   * @return the total length of files
233   * @throws IOException exception
234   */
235  public static long getFilesLength(FileSystem fs, Path dir) throws IOException {
236    long totalLength = 0;
237    FileStatus[] files = CommonFSUtils.listStatus(fs, dir);
238    if (files != null) {
239      for (FileStatus fileStatus : files) {
240        if (fileStatus.isDirectory()) {
241          totalLength += getFilesLength(fs, fileStatus.getPath());
242        } else {
243          totalLength += fileStatus.getLen();
244        }
245      }
246    }
247    return totalLength;
248  }
249
250  /**
251   * Get list of all old WAL files (WALs and archive)
252   * @param c                configuration
253   * @param hostTimestampMap {host,timestamp} map
254   * @return list of WAL files
255   * @throws IOException exception
256   */
257  public static List<String> getWALFilesOlderThan(final Configuration c,
258    final HashMap<String, Long> hostTimestampMap) throws IOException {
259    Path walRootDir = CommonFSUtils.getWALRootDir(c);
260    Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
261    Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
262    List<String> logFiles = new ArrayList<>();
263
264    PathFilter filter = p -> {
265      try {
266        if (AbstractFSWALProvider.isMetaFile(p)) {
267          return false;
268        }
269        String host = parseHostNameFromLogFile(p);
270        if (host == null) {
271          return false;
272        }
273        Long oldTimestamp = hostTimestampMap.get(host);
274        Long currentLogTS = BackupUtils.getCreationTime(p);
275        return currentLogTS <= oldTimestamp;
276      } catch (Exception e) {
277        LOG.warn("Can not parse" + p, e);
278        return false;
279      }
280    };
281    FileSystem walFs = CommonFSUtils.getWALFileSystem(c);
282    logFiles = BackupUtils.getFiles(walFs, logDir, logFiles, filter);
283    logFiles = BackupUtils.getFiles(walFs, oldLogDir, logFiles, filter);
284    return logFiles;
285  }
286
287  public static TableName[] parseTableNames(String tables) {
288    if (tables == null) {
289      return null;
290    }
291    return Splitter.on(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND).splitToStream(tables)
292      .map(TableName::valueOf).toArray(TableName[]::new);
293  }
294
295  /**
296   * Check whether the backup path exist
297   * @param backupStr backup
298   * @param conf      configuration
299   * @return Yes if path exists
300   * @throws IOException exception
301   */
302  public static boolean checkPathExist(String backupStr, Configuration conf) throws IOException {
303    boolean isExist = false;
304    Path backupPath = new Path(backupStr);
305    FileSystem fileSys = backupPath.getFileSystem(conf);
306    String targetFsScheme = fileSys.getUri().getScheme();
307    if (LOG.isTraceEnabled()) {
308      LOG.trace("Schema of given url: " + backupStr + " is: " + targetFsScheme);
309    }
310    if (fileSys.exists(backupPath)) {
311      isExist = true;
312    }
313    return isExist;
314  }
315
316  /**
317   * Check target path first, confirm it doesn't exist before backup
318   * @param backupRootPath backup destination path
319   * @param conf           configuration
320   * @throws IOException exception
321   */
322  public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException {
323    boolean targetExists;
324    try {
325      targetExists = checkPathExist(backupRootPath, conf);
326    } catch (IOException e) {
327      String expMsg = e.getMessage();
328      String newMsg = null;
329      if (expMsg.contains("No FileSystem for scheme")) {
330        newMsg =
331          "Unsupported filesystem scheme found in the backup target url. Error Message: " + expMsg;
332        LOG.error(newMsg);
333        throw new IOException(newMsg);
334      } else {
335        throw e;
336      }
337    }
338
339    if (targetExists) {
340      LOG.info("Using existing backup root dir: " + backupRootPath);
341    } else {
342      LOG.info("Backup root dir " + backupRootPath + " does not exist. Will be created.");
343    }
344  }
345
346  /**
347   * Get the min value for all the Values a map.
348   * @param map map
349   * @return the min value
350   */
351  public static <T> Long getMinValue(Map<T, Long> map) {
352    Long minTimestamp = null;
353    if (map != null) {
354      ArrayList<Long> timestampList = new ArrayList<>(map.values());
355      Collections.sort(timestampList);
356      // The min among all the RS log timestamps will be kept in backup system table table.
357      minTimestamp = timestampList.get(0);
358    }
359    return minTimestamp;
360  }
361
362  /**
363   * Parses host name:port from archived WAL path
364   * @param p path
365   * @return host name
366   */
367  public static String parseHostFromOldLog(Path p) {
368    // Skip master wals
369    if (p.getName().endsWith(MasterRegionFactory.ARCHIVED_WAL_SUFFIX)) {
370      return null;
371    }
372    try {
373      String urlDecodedName = URLDecoder.decode(p.getName(), "UTF8");
374      Iterable<String> nameSplitsOnComma = Splitter.on(",").split(urlDecodedName);
375      String host = Iterables.get(nameSplitsOnComma, 0);
376      String port = Iterables.get(nameSplitsOnComma, 1);
377      return host + ":" + port;
378    } catch (Exception e) {
379      LOG.warn("Skip log file (can't parse): {}", p);
380      return null;
381    }
382  }
383
384  /**
385   * Given the log file, parse the timestamp from the file name. The timestamp is the last number.
386   * @param p a path to the log file
387   * @return the timestamp
388   * @throws IOException exception
389   */
390  public static Long getCreationTime(Path p) throws IOException {
391    int idx = p.getName().lastIndexOf(LOGNAME_SEPARATOR);
392    if (idx < 0) {
393      throw new IOException("Cannot parse timestamp from path " + p);
394    }
395    String ts = p.getName().substring(idx + 1);
396    return Long.parseLong(ts);
397  }
398
399  public static List<String> getFiles(FileSystem fs, Path rootDir, List<String> files,
400    PathFilter filter) throws IOException {
401    RemoteIterator<LocatedFileStatus> it = fs.listFiles(rootDir, true);
402
403    while (it.hasNext()) {
404      LocatedFileStatus lfs = it.next();
405      if (lfs.isDirectory()) {
406        continue;
407      }
408      // apply filter
409      if (filter.accept(lfs.getPath())) {
410        files.add(lfs.getPath().toString());
411      }
412    }
413    return files;
414  }
415
416  public static void cleanupBackupData(BackupInfo context, Configuration conf) throws IOException {
417    cleanupHLogDir(context, conf);
418    cleanupTargetDir(context, conf);
419  }
420
421  /**
422   * Clean up directories which are generated when DistCp copying hlogs
423   * @param backupInfo backup info
424   * @param conf       configuration
425   * @throws IOException exception
426   */
427  private static void cleanupHLogDir(BackupInfo backupInfo, Configuration conf) throws IOException {
428    String logDir = backupInfo.getHLogTargetDir();
429    if (logDir == null) {
430      LOG.warn("No log directory specified for " + backupInfo.getBackupId());
431      return;
432    }
433
434    Path rootPath = new Path(logDir).getParent();
435    FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
436    FileStatus[] files = listStatus(fs, rootPath, null);
437    if (files == null) {
438      return;
439    }
440    for (FileStatus file : files) {
441      LOG.debug("Delete log files: " + file.getPath().getName());
442      fs.delete(file.getPath(), true);
443    }
444  }
445
446  private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) {
447    try {
448      // clean up the data at target directory
449      LOG.debug("Trying to cleanup up target dir : " + backupInfo.getBackupId());
450      String targetDir = backupInfo.getBackupRootDir();
451      if (targetDir == null) {
452        LOG.warn("No target directory specified for " + backupInfo.getBackupId());
453        return;
454      }
455
456      FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
457
458      for (TableName table : backupInfo.getTables()) {
459        Path targetDirPath = new Path(
460          getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
461        if (outputFs.delete(targetDirPath, true)) {
462          LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
463        } else {
464          LOG.info("No data has been found in " + targetDirPath.toString() + ".");
465        }
466
467        Path tableDir = targetDirPath.getParent();
468        FileStatus[] backups = listStatus(outputFs, tableDir, null);
469        if (backups == null || backups.length == 0) {
470          outputFs.delete(tableDir, true);
471          LOG.debug(tableDir.toString() + " is empty, remove it.");
472        }
473      }
474      outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true);
475    } catch (IOException e1) {
476      LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at "
477        + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
478    }
479  }
480
481  /**
482   * Given the backup root dir, backup id and the table name, return the backup image location,
483   * which is also where the backup manifest file is. return value look like:
484   * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/"
485   * @param backupRootDir backup root directory
486   * @param backupId      backup id
487   * @param tableName     table name
488   * @return backupPath String for the particular table
489   */
490  public static String getTableBackupDir(String backupRootDir, String backupId,
491    TableName tableName) {
492    return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
493      + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
494      + Path.SEPARATOR;
495  }
496
497  /**
498   * Sort history list by start time in descending order.
499   * @param historyList history list
500   * @return sorted list of BackupCompleteData
501   */
502  public static ArrayList<BackupInfo> sortHistoryListDesc(ArrayList<BackupInfo> historyList) {
503    ArrayList<BackupInfo> list = new ArrayList<>();
504    TreeMap<String, BackupInfo> map = new TreeMap<>();
505    for (BackupInfo h : historyList) {
506      map.put(Long.toString(h.getStartTs()), h);
507    }
508    Iterator<String> i = map.descendingKeySet().iterator();
509    while (i.hasNext()) {
510      list.add(map.get(i.next()));
511    }
512    return list;
513  }
514
515  /**
516   * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates
517   * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and
518   * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException.
519   * @param fs     file system
520   * @param dir    directory
521   * @param filter path filter
522   * @return null if dir is empty or doesn't exist, otherwise FileStatus array
523   */
524  public static FileStatus[] listStatus(final FileSystem fs, final Path dir,
525    final PathFilter filter) throws IOException {
526    FileStatus[] status = null;
527    try {
528      status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
529    } catch (FileNotFoundException fnfe) {
530      // if directory doesn't exist, return null
531      if (LOG.isTraceEnabled()) {
532        LOG.trace(dir + " doesn't exist");
533      }
534    }
535
536    if (status == null || status.length < 1) {
537      return null;
538    }
539
540    return status;
541  }
542
543  /**
544   * Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the 'path'
545   * component of a Path's URI: e.g. If a Path is
546   * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>, this method returns
547   * <code>/hbase_trunk/TestTable/compaction.dir</code>. This method is useful if you want to print
548   * out a Path without qualifying Filesystem instance.
549   * @param p file system Path whose 'path' component we are to return.
550   * @return Path portion of the Filesystem
551   */
552  public static String getPath(Path p) {
553    return p.toUri().getPath();
554  }
555
556  /**
557   * Given the backup root dir and the backup id, return the log file location for an incremental
558   * backup.
559   * @param backupRootDir backup root directory
560   * @param backupId      backup id
561   * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738"
562   */
563  public static String getLogBackupDir(String backupRootDir, String backupId) {
564    return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
565      + HConstants.HREGION_LOGDIR_NAME;
566  }
567
568  private static List<BackupInfo> getHistory(Configuration conf, Path backupRootPath)
569    throws IOException {
570    // Get all (n) history from backup root destination
571
572    FileSystem fs = FileSystem.get(backupRootPath.toUri(), conf);
573    RemoteIterator<LocatedFileStatus> it = fs.listLocatedStatus(backupRootPath);
574
575    List<BackupInfo> infos = new ArrayList<>();
576    while (it.hasNext()) {
577      LocatedFileStatus lfs = it.next();
578
579      if (!lfs.isDirectory()) {
580        continue;
581      }
582
583      String backupId = lfs.getPath().getName();
584      try {
585        BackupInfo info = loadBackupInfo(backupRootPath, backupId, fs);
586        infos.add(info);
587      } catch (IOException e) {
588        LOG.error("Can not load backup info from: " + lfs.getPath(), e);
589      }
590    }
591    // Sort
592    Collections.sort(infos, new Comparator<BackupInfo>() {
593      @Override
594      public int compare(BackupInfo o1, BackupInfo o2) {
595        long ts1 = getTimestamp(o1.getBackupId());
596        long ts2 = getTimestamp(o2.getBackupId());
597
598        if (ts1 == ts2) {
599          return 0;
600        }
601
602        return ts1 < ts2 ? 1 : -1;
603      }
604
605      private long getTimestamp(String backupId) {
606        return Long.parseLong(Iterators.get(Splitter.on('_').split(backupId).iterator(), 1));
607      }
608    });
609    return infos;
610  }
611
612  public static List<BackupInfo> getHistory(Configuration conf, int n, Path backupRootPath,
613    BackupInfo.Filter... filters) throws IOException {
614    List<BackupInfo> infos = getHistory(conf, backupRootPath);
615    List<BackupInfo> ret = new ArrayList<>();
616    for (BackupInfo info : infos) {
617      if (ret.size() == n) {
618        break;
619      }
620      boolean passed = true;
621      for (int i = 0; i < filters.length; i++) {
622        if (!filters[i].apply(info)) {
623          passed = false;
624          break;
625        }
626      }
627      if (passed) {
628        ret.add(info);
629      }
630    }
631    return ret;
632  }
633
634  public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, FileSystem fs)
635    throws IOException {
636    Path backupPath = new Path(backupRootPath, backupId);
637
638    RemoteIterator<LocatedFileStatus> it = fs.listFiles(backupPath, true);
639    while (it.hasNext()) {
640      LocatedFileStatus lfs = it.next();
641      if (lfs.getPath().getName().equals(BackupManifest.MANIFEST_FILE_NAME)) {
642        // Load BackupManifest
643        BackupManifest manifest = new BackupManifest(fs, lfs.getPath().getParent());
644        BackupInfo info = manifest.toBackupInfo();
645        return info;
646      }
647    }
648    return null;
649  }
650
651  /**
652   * Create restore request.
653   * @param backupRootDir backup root dir
654   * @param backupId      backup id
655   * @param check         check only
656   * @param fromTables    table list from
657   * @param toTables      table list to
658   * @param isOverwrite   overwrite data
659   * @return request obkect
660   */
661  public static RestoreRequest createRestoreRequest(String backupRootDir, String backupId,
662    boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite) {
663    return createRestoreRequest(backupRootDir, backupId, check, fromTables, toTables, isOverwrite,
664      false);
665  }
666
667  public static RestoreRequest createRestoreRequest(String backupRootDir, String backupId,
668    boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite,
669    boolean isKeepOriginalSplits) {
670    RestoreRequest.Builder builder = new RestoreRequest.Builder();
671    RestoreRequest request = builder.withBackupRootDir(backupRootDir).withBackupId(backupId)
672      .withCheck(check).withFromTables(fromTables).withToTables(toTables).withOvewrite(isOverwrite)
673      .withKeepOriginalSplits(isKeepOriginalSplits).build();
674    return request;
675  }
676
677  public static boolean validate(List<TableName> tables, BackupManifest backupManifest,
678    Configuration conf) throws IOException {
679    boolean isValid = true;
680
681    for (TableName table : tables) {
682      TreeSet<BackupImage> imageSet = new TreeSet<>();
683
684      ArrayList<BackupImage> depList = backupManifest.getDependentListByTable(table);
685      if (depList != null && !depList.isEmpty()) {
686        imageSet.addAll(depList);
687      }
688
689      LOG.info("Dependent image(s) from old to new:");
690      for (BackupImage image : imageSet) {
691        String imageDir =
692          HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table);
693        if (!BackupUtils.checkPathExist(imageDir, conf)) {
694          LOG.error("ERROR: backup image does not exist: " + imageDir);
695          isValid = false;
696          break;
697        }
698        LOG.info("Backup image: " + image.getBackupId() + " for '" + table + "' is available");
699      }
700    }
701    return isValid;
702  }
703
704  public static Path getBulkOutputDir(Path restoreRootDir, String tableName, Configuration conf,
705    boolean deleteOnExit) throws IOException {
706    FileSystem fs = restoreRootDir.getFileSystem(conf);
707    Path path = new Path(restoreRootDir,
708      "bulk_output-" + tableName + "-" + EnvironmentEdgeManager.currentTime());
709    if (deleteOnExit) {
710      fs.deleteOnExit(path);
711    }
712    return path;
713  }
714
715  public static Path getBulkOutputDir(Path restoreRootDir, String tableName, Configuration conf)
716    throws IOException {
717    return getBulkOutputDir(restoreRootDir, tableName, conf, true);
718  }
719
720  public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit)
721    throws IOException {
722    FileSystem fs = FileSystem.get(conf);
723    return getBulkOutputDir(getTmpRestoreOutputDir(fs, conf), tableName, conf, deleteOnExit);
724  }
725
726  /**
727   * Build temporary output path
728   * @param fs   filesystem for default output dir
729   * @param conf configuration
730   * @return output path
731   */
732  public static Path getTmpRestoreOutputDir(FileSystem fs, Configuration conf) {
733    String tmp =
734      conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + "/hbase-staging");
735    return new Path(tmp);
736  }
737
738  public static String getFileNameCompatibleString(TableName table) {
739    return table.getNamespaceAsString() + "-" + table.getQualifierAsString();
740  }
741
742  public static boolean failed(int result) {
743    return result != 0;
744  }
745
746  public static boolean succeeded(int result) {
747    return result == 0;
748  }
749
750  public static BulkLoadHFiles createLoader(Configuration config) {
751    // set configuration for restore:
752    // LoadIncrementalHFile needs more time
753    // <name>hbase.rpc.timeout</name> <value>600000</value>
754    // calculates
755    Configuration conf = new Configuration(config);
756    conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, MILLISEC_IN_HOUR);
757
758    // By default, it is 32 and loader will fail if # of files in any region exceed this
759    // limit. Bad for snapshot restore.
760    conf.setInt(BulkLoadHFiles.MAX_FILES_PER_REGION_PER_FAMILY, Integer.MAX_VALUE);
761    conf.set(BulkLoadHFiles.IGNORE_UNMATCHED_CF_CONF_KEY, "yes");
762    return BulkLoadHFiles.create(conf);
763  }
764
765  public static String findMostRecentBackupId(String[] backupIds) {
766    long recentTimestamp = Long.MIN_VALUE;
767    for (String backupId : backupIds) {
768      long ts = Long.parseLong(Iterators.get(Splitter.on('_').split(backupId).iterator(), 1));
769      if (ts > recentTimestamp) {
770        recentTimestamp = ts;
771      }
772    }
773    return BackupRestoreConstants.BACKUPID_PREFIX + recentTimestamp;
774  }
775
776  /**
777   * roll WAL writer for all region servers and record the newest log roll result
778   */
779  public static void logRoll(Connection conn, String backupRootDir, Configuration conf)
780    throws IOException {
781    boolean legacy = conf.getBoolean("hbase.backup.logroll.legacy.used", false);
782    if (legacy) {
783      logRollV1(conn, backupRootDir);
784    } else {
785      logRollV2(conn, backupRootDir);
786    }
787  }
788
789  private static void logRollV1(Connection conn, String backupRootDir) throws IOException {
790    try (Admin admin = conn.getAdmin()) {
791      admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
792        LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME,
793        ImmutableMap.of("backupRoot", backupRootDir));
794    }
795  }
796
797  private static void logRollV2(Connection conn, String backupRootDir) throws IOException {
798    BackupSystemTable backupSystemTable = new BackupSystemTable(conn);
799    HashMap<String, Long> lastLogRollResult =
800      backupSystemTable.readRegionServerLastLogRollResult(backupRootDir);
801    try (Admin admin = conn.getAdmin()) {
802      Map<ServerName, Long> newLogRollResult = admin.rollAllWALWriters();
803
804      for (Map.Entry<ServerName, Long> entry : newLogRollResult.entrySet()) {
805        ServerName serverName = entry.getKey();
806        long newHighestWALFilenum = entry.getValue();
807
808        String address = serverName.getAddress().toString();
809        Long lastHighestWALFilenum = lastLogRollResult.get(address);
810        if (lastHighestWALFilenum != null && lastHighestWALFilenum > newHighestWALFilenum) {
811          LOG.warn("Won't update last roll log result for server {}: current = {}, new = {}",
812            serverName, lastHighestWALFilenum, newHighestWALFilenum);
813        } else {
814          backupSystemTable.writeRegionServerLastLogRollResult(address, newHighestWALFilenum,
815            backupRootDir);
816          if (LOG.isDebugEnabled()) {
817            LOG.debug("updated last roll log result for {} from {} to {}", serverName,
818              lastHighestWALFilenum, newHighestWALFilenum);
819          }
820        }
821      }
822    }
823  }
824}