001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup.util;
019
020import java.io.FileNotFoundException;
021import java.io.IOException;
022import java.net.URLDecoder;
023import java.util.ArrayList;
024import java.util.Collections;
025import java.util.Comparator;
026import java.util.HashMap;
027import java.util.List;
028import java.util.Map;
029import java.util.Map.Entry;
030import java.util.TreeSet;
031import java.util.function.Predicate;
032import java.util.stream.Collectors;
033import java.util.stream.Stream;
034import org.apache.hadoop.conf.Configuration;
035import org.apache.hadoop.fs.FSDataOutputStream;
036import org.apache.hadoop.fs.FileStatus;
037import org.apache.hadoop.fs.FileSystem;
038import org.apache.hadoop.fs.LocatedFileStatus;
039import org.apache.hadoop.fs.Path;
040import org.apache.hadoop.fs.PathFilter;
041import org.apache.hadoop.fs.RemoteIterator;
042import org.apache.hadoop.fs.permission.FsPermission;
043import org.apache.hadoop.hbase.HConstants;
044import org.apache.hadoop.hbase.MetaTableAccessor;
045import org.apache.hadoop.hbase.ServerName;
046import org.apache.hadoop.hbase.TableName;
047import org.apache.hadoop.hbase.backup.BackupInfo;
048import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
049import org.apache.hadoop.hbase.backup.HBackupFileSystem;
050import org.apache.hadoop.hbase.backup.RestoreRequest;
051import org.apache.hadoop.hbase.backup.impl.BackupManifest;
052import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
053import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
054import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
055import org.apache.hadoop.hbase.client.Admin;
056import org.apache.hadoop.hbase.client.Connection;
057import org.apache.hadoop.hbase.client.RegionInfo;
058import org.apache.hadoop.hbase.client.TableDescriptor;
059import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
060import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
061import org.apache.hadoop.hbase.util.CommonFSUtils;
062import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
063import org.apache.hadoop.hbase.util.FSTableDescriptors;
064import org.apache.hadoop.hbase.util.FSUtils;
065import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
066import org.apache.yetus.audience.InterfaceAudience;
067import org.slf4j.Logger;
068import org.slf4j.LoggerFactory;
069
070import org.apache.hbase.thirdparty.com.google.common.base.Splitter;
071import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
072import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
073import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
074
075/**
076 * A collection for methods used by multiple classes to backup HBase tables.
077 */
078@InterfaceAudience.Private
079public final class BackupUtils {
080  private static final Logger LOG = LoggerFactory.getLogger(BackupUtils.class);
081  public static final String LOGNAME_SEPARATOR = ".";
082  public static final int MILLISEC_IN_HOUR = 3600000;
083
084  private BackupUtils() {
085    throw new AssertionError("Instantiating utility class...");
086  }
087
088  /**
089   * Loop through the RS log timestamp map for the tables, for each RS, find the min timestamp value
090   * for the RS among the tables.
091   * @param rsLogTimestampMap timestamp map
092   * @return the min timestamp of each RS
093   */
094  public static Map<String, Long>
095    getRSLogTimestampMins(Map<TableName, Map<String, Long>> rsLogTimestampMap) {
096    return rsLogTimestampMap.values().stream().flatMap(map -> map.entrySet().stream())
097      .collect(Collectors.toMap(Entry::getKey, Entry::getValue, Math::min));
098  }
099
100  /**
101   * copy out Table RegionInfo into incremental backup image need to consider move this logic into
102   * HBackupFileSystem
103   * @param conn       connection
104   * @param backupInfo backup info
105   * @param conf       configuration
106   * @throws IOException exception
107   */
108  public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf)
109    throws IOException {
110    Path rootDir = CommonFSUtils.getRootDir(conf);
111    FileSystem fs = rootDir.getFileSystem(conf);
112
113    // for each table in the table set, copy out the table info and region
114    // info files in the correct directory structure
115    try (Admin admin = conn.getAdmin()) {
116      for (TableName table : backupInfo.getTables()) {
117        if (!admin.tableExists(table)) {
118          LOG.warn("Table " + table + " does not exists, skipping it.");
119          continue;
120        }
121        TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
122
123        // write a copy of descriptor to the target directory
124        Path target = new Path(backupInfo.getTableBackupDir(table));
125        FileSystem targetFs = target.getFileSystem(conf);
126        try (FSTableDescriptors descriptors =
127          new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf))) {
128          descriptors.createTableDescriptorForTableDirectory(target, orig, false);
129        }
130        LOG.debug("Attempting to copy table info for:" + table + " target: " + target
131          + " descriptor: " + orig);
132        LOG.debug("Finished copying tableinfo.");
133        List<RegionInfo> regions = MetaTableAccessor.getTableRegions(conn, table);
134        // For each region, write the region info to disk
135        LOG.debug("Starting to write region info for table " + table);
136        for (RegionInfo regionInfo : regions) {
137          Path regionDir = FSUtils
138            .getRegionDirFromTableDir(new Path(backupInfo.getTableBackupDir(table)), regionInfo);
139          regionDir = new Path(backupInfo.getTableBackupDir(table), regionDir.getName());
140          writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
141        }
142        LOG.debug("Finished writing region info for table " + table);
143      }
144    }
145  }
146
147  /**
148   * Write the .regioninfo file on-disk.
149   */
150  public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs,
151    final Path regionInfoDir, RegionInfo regionInfo) throws IOException {
152    final byte[] content = RegionInfo.toDelimitedByteArray(regionInfo);
153    Path regionInfoFile = new Path(regionInfoDir, "." + HConstants.REGIONINFO_QUALIFIER_STR);
154    // First check to get the permissions
155    FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
156    // Write the RegionInfo file content
157    FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null);
158    try {
159      out.write(content);
160    } finally {
161      out.close();
162    }
163  }
164
165  /**
166   * Parses hostname:port from WAL file path
167   * @param p path to WAL file
168   * @return hostname:port
169   */
170  public static String parseHostNameFromLogFile(Path p) {
171    try {
172      if (AbstractFSWALProvider.isArchivedLogFile(p)) {
173        return BackupUtils.parseHostFromOldLog(p);
174      } else {
175        ServerName sname = AbstractFSWALProvider.getServerNameFromWALDirectoryName(p);
176        if (sname != null) {
177          return sname.getAddress().toString();
178        } else {
179          LOG.error("Skip log file (can't parse): " + p);
180          return null;
181        }
182      }
183    } catch (Exception e) {
184      LOG.error("Skip log file (can't parse): " + p, e);
185      return null;
186    }
187  }
188
189  /**
190   * Returns WAL file name
191   * @param walFileName WAL file name
192   * @return WAL file name
193   */
194  public static String getUniqueWALFileNamePart(String walFileName) {
195    return getUniqueWALFileNamePart(new Path(walFileName));
196  }
197
198  /**
199   * Returns WAL file name
200   * @param p WAL file path
201   * @return WAL file name
202   */
203  public static String getUniqueWALFileNamePart(Path p) {
204    return p.getName();
205  }
206
207  /**
208   * Get the total length of files under the given directory recursively.
209   * @param fs  The hadoop file system
210   * @param dir The target directory
211   * @return the total length of files
212   * @throws IOException exception
213   */
214  public static long getFilesLength(FileSystem fs, Path dir) throws IOException {
215    long totalLength = 0;
216    FileStatus[] files = CommonFSUtils.listStatus(fs, dir);
217    if (files != null) {
218      for (FileStatus fileStatus : files) {
219        if (fileStatus.isDirectory()) {
220          totalLength += getFilesLength(fs, fileStatus.getPath());
221        } else {
222          totalLength += fileStatus.getLen();
223        }
224      }
225    }
226    return totalLength;
227  }
228
229  /**
230   * Get list of all old WAL files (WALs and archive)
231   * @param c                configuration
232   * @param hostTimestampMap {host,timestamp} map
233   * @return list of WAL files
234   * @throws IOException exception
235   */
236  public static List<String> getWALFilesOlderThan(final Configuration c,
237    final HashMap<String, Long> hostTimestampMap) throws IOException {
238    Path walRootDir = CommonFSUtils.getWALRootDir(c);
239    Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
240    Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
241    List<String> logFiles = new ArrayList<>();
242
243    PathFilter filter = p -> {
244      try {
245        if (AbstractFSWALProvider.isMetaFile(p)) {
246          return false;
247        }
248        String host = parseHostNameFromLogFile(p);
249        if (host == null) {
250          return false;
251        }
252        Long oldTimestamp = hostTimestampMap.get(host);
253        Long currentLogTS = BackupUtils.getCreationTime(p);
254        return currentLogTS <= oldTimestamp;
255      } catch (Exception e) {
256        LOG.warn("Can not parse" + p, e);
257        return false;
258      }
259    };
260    FileSystem walFs = CommonFSUtils.getWALFileSystem(c);
261    logFiles = BackupUtils.getFiles(walFs, logDir, logFiles, filter);
262    logFiles = BackupUtils.getFiles(walFs, oldLogDir, logFiles, filter);
263    return logFiles;
264  }
265
266  public static TableName[] parseTableNames(String tables) {
267    if (tables == null) {
268      return null;
269    }
270    return Splitter.on(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND).splitToStream(tables)
271      .map(TableName::valueOf).toArray(TableName[]::new);
272  }
273
274  /**
275   * Check whether the backup path exist
276   * @param backupStr backup
277   * @param conf      configuration
278   * @return Yes if path exists
279   * @throws IOException exception
280   */
281  public static boolean checkPathExist(String backupStr, Configuration conf) throws IOException {
282    boolean isExist = false;
283    Path backupPath = new Path(backupStr);
284    FileSystem fileSys = backupPath.getFileSystem(conf);
285    String targetFsScheme = fileSys.getUri().getScheme();
286    if (LOG.isTraceEnabled()) {
287      LOG.trace("Schema of given url: " + backupStr + " is: " + targetFsScheme);
288    }
289    if (fileSys.exists(backupPath)) {
290      isExist = true;
291    }
292    return isExist;
293  }
294
295  /**
296   * Check target path first, confirm it doesn't exist before backup
297   * @param backupRootPath backup destination path
298   * @param conf           configuration
299   * @throws IOException exception
300   */
301  public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException {
302    boolean targetExists;
303    try {
304      targetExists = checkPathExist(backupRootPath, conf);
305    } catch (IOException e) {
306      String expMsg = e.getMessage();
307      String newMsg = null;
308      if (expMsg.contains("No FileSystem for scheme")) {
309        newMsg =
310          "Unsupported filesystem scheme found in the backup target url. Error Message: " + expMsg;
311        LOG.error(newMsg);
312        throw new IOException(newMsg);
313      } else {
314        throw e;
315      }
316    }
317
318    if (targetExists) {
319      LOG.info("Using existing backup root dir: " + backupRootPath);
320    } else {
321      LOG.info("Backup root dir " + backupRootPath + " does not exist. Will be created.");
322    }
323  }
324
325  /**
326   * Parses host name:port from archived WAL path
327   * @param p path
328   * @return host name
329   */
330  public static String parseHostFromOldLog(Path p) {
331    // Skip master wals
332    if (p.getName().endsWith(MasterRegionFactory.ARCHIVED_WAL_SUFFIX)) {
333      return null;
334    }
335    try {
336      String urlDecodedName = URLDecoder.decode(p.getName(), "UTF8");
337      Iterable<String> nameSplitsOnComma = Splitter.on(",").split(urlDecodedName);
338      String host = Iterables.get(nameSplitsOnComma, 0);
339      String port = Iterables.get(nameSplitsOnComma, 1);
340      return host + ":" + port;
341    } catch (Exception e) {
342      LOG.warn("Skip log file (can't parse): {}", p);
343      return null;
344    }
345  }
346
347  /**
348   * Given the log file, parse the timestamp from the file name. The timestamp is the last number.
349   * @param p a path to the log file
350   * @return the timestamp
351   * @throws IOException exception
352   */
353  public static Long getCreationTime(Path p) throws IOException {
354    int idx = p.getName().lastIndexOf(LOGNAME_SEPARATOR);
355    if (idx < 0) {
356      throw new IOException("Cannot parse timestamp from path " + p);
357    }
358    String ts = p.getName().substring(idx + 1);
359    return Long.parseLong(ts);
360  }
361
362  public static List<String> getFiles(FileSystem fs, Path rootDir, List<String> files,
363    PathFilter filter) throws IOException {
364    RemoteIterator<LocatedFileStatus> it = fs.listFiles(rootDir, true);
365
366    while (it.hasNext()) {
367      LocatedFileStatus lfs = it.next();
368      if (lfs.isDirectory()) {
369        continue;
370      }
371      // apply filter
372      if (filter.accept(lfs.getPath())) {
373        files.add(lfs.getPath().toString());
374      }
375    }
376    return files;
377  }
378
379  public static void cleanupBackupData(BackupInfo context, Configuration conf) throws IOException {
380    cleanupHLogDir(context, conf);
381    cleanupTargetDir(context, conf);
382  }
383
384  /**
385   * Clean up directories which are generated when DistCp copying hlogs
386   * @param backupInfo backup info
387   * @param conf       configuration
388   * @throws IOException exception
389   */
390  private static void cleanupHLogDir(BackupInfo backupInfo, Configuration conf) throws IOException {
391    String logDir = backupInfo.getHLogTargetDir();
392    if (logDir == null) {
393      LOG.warn("No log directory specified for " + backupInfo.getBackupId());
394      return;
395    }
396
397    Path rootPath = new Path(logDir).getParent();
398    FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
399    FileStatus[] files = listStatus(fs, rootPath, null);
400    if (files == null) {
401      return;
402    }
403    for (FileStatus file : files) {
404      LOG.debug("Delete log files: " + file.getPath().getName());
405      fs.delete(file.getPath(), true);
406    }
407  }
408
409  private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) {
410    try {
411      // clean up the data at target directory
412      LOG.debug("Trying to cleanup up target dir : " + backupInfo.getBackupId());
413      String targetDir = backupInfo.getBackupRootDir();
414      if (targetDir == null) {
415        LOG.warn("No target directory specified for " + backupInfo.getBackupId());
416        return;
417      }
418
419      FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
420
421      for (TableName table : backupInfo.getTables()) {
422        Path targetDirPath = new Path(
423          getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
424        if (outputFs.delete(targetDirPath, true)) {
425          LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
426        } else {
427          LOG.info("No data has been found in " + targetDirPath.toString() + ".");
428        }
429
430        Path tableDir = targetDirPath.getParent();
431        FileStatus[] backups = listStatus(outputFs, tableDir, null);
432        if (backups == null || backups.length == 0) {
433          outputFs.delete(tableDir, true);
434          LOG.debug(tableDir.toString() + " is empty, remove it.");
435        }
436      }
437      outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true);
438    } catch (IOException e1) {
439      LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at "
440        + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
441    }
442  }
443
444  /**
445   * Given the backup root dir, backup id and the table name, return the backup image location,
446   * which is also where the backup manifest file is. return value look like:
447   * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/"
448   * @param backupRootDir backup root directory
449   * @param backupId      backup id
450   * @param tableName     table name
451   * @return backupPath String for the particular table
452   */
453  public static String getTableBackupDir(String backupRootDir, String backupId,
454    TableName tableName) {
455    return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
456      + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
457      + Path.SEPARATOR;
458  }
459
460  /**
461   * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates
462   * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and
463   * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException.
464   * @param fs     file system
465   * @param dir    directory
466   * @param filter path filter
467   * @return null if dir is empty or doesn't exist, otherwise FileStatus array
468   */
469  public static FileStatus[] listStatus(final FileSystem fs, final Path dir,
470    final PathFilter filter) throws IOException {
471    FileStatus[] status = null;
472    try {
473      status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
474    } catch (FileNotFoundException fnfe) {
475      // if directory doesn't exist, return null
476      if (LOG.isTraceEnabled()) {
477        LOG.trace(dir + " doesn't exist");
478      }
479    }
480
481    if (status == null || status.length < 1) {
482      return null;
483    }
484
485    return status;
486  }
487
488  /**
489   * Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the 'path'
490   * component of a Path's URI: e.g. If a Path is
491   * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>, this method returns
492   * <code>/hbase_trunk/TestTable/compaction.dir</code>. This method is useful if you want to print
493   * out a Path without qualifying Filesystem instance.
494   * @param p file system Path whose 'path' component we are to return.
495   * @return Path portion of the Filesystem
496   */
497  public static String getPath(Path p) {
498    return p.toUri().getPath();
499  }
500
501  /**
502   * Given the backup root dir and the backup id, return the log file location for an incremental
503   * backup.
504   * @param backupRootDir backup root directory
505   * @param backupId      backup id
506   * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738"
507   */
508  public static String getLogBackupDir(String backupRootDir, String backupId) {
509    return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
510      + HConstants.HREGION_LOGDIR_NAME;
511  }
512
513  /**
514   * Loads all backup history as stored in files on the given backup root path.
515   * @return all backup history, from newest (most recent) to oldest (least recent)
516   */
517  private static List<BackupInfo> getHistory(Configuration conf, Path backupRootPath)
518    throws IOException {
519    // Get all (n) history from backup root destination
520
521    FileSystem fs = FileSystem.get(backupRootPath.toUri(), conf);
522    RemoteIterator<LocatedFileStatus> it;
523    try {
524      it = fs.listLocatedStatus(backupRootPath);
525    } catch (FileNotFoundException e) {
526      return Collections.emptyList();
527    }
528
529    List<BackupInfo> infos = new ArrayList<>();
530    while (it.hasNext()) {
531      LocatedFileStatus lfs = it.next();
532
533      if (!lfs.isDirectory()) {
534        continue;
535      }
536
537      String backupId = lfs.getPath().getName();
538      try {
539        BackupInfo info = loadBackupInfo(backupRootPath, backupId, fs);
540        infos.add(info);
541      } catch (IOException e) {
542        LOG.error("Can not load backup info from: " + lfs.getPath(), e);
543      }
544    }
545    // Sort
546    infos.sort(Comparator.<BackupInfo> naturalOrder().reversed());
547    return infos;
548  }
549
550  /**
551   * Loads all backup history as stored in files on the given backup root path, and returns the
552   * first n entries matching all given filters.
553   * @return (subset of) backup history, from newest (most recent) to oldest (least recent)
554   */
555  public static List<BackupInfo> getHistory(Configuration conf, int n, Path backupRootPath,
556    BackupInfo.Filter... filters) throws IOException {
557    List<BackupInfo> infos = getHistory(conf, backupRootPath);
558
559    Predicate<BackupInfo> combinedPredicate = Stream.of(filters)
560      .map(filter -> (Predicate<BackupInfo>) filter).reduce(Predicate::and).orElse(x -> true);
561
562    return infos.stream().filter(combinedPredicate).limit(n).toList();
563  }
564
565  public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, FileSystem fs)
566    throws IOException {
567    Path backupPath = new Path(backupRootPath, backupId);
568
569    RemoteIterator<LocatedFileStatus> it = fs.listFiles(backupPath, true);
570    while (it.hasNext()) {
571      LocatedFileStatus lfs = it.next();
572      if (lfs.getPath().getName().equals(BackupManifest.MANIFEST_FILE_NAME)) {
573        // Load BackupManifest
574        BackupManifest manifest = new BackupManifest(fs, lfs.getPath().getParent());
575        BackupInfo info = manifest.toBackupInfo();
576        return info;
577      }
578    }
579    return null;
580  }
581
582  /**
583   * Create restore request.
584   * @param backupRootDir backup root dir
585   * @param backupId      backup id
586   * @param check         check only
587   * @param fromTables    table list from
588   * @param toTables      table list to
589   * @param isOverwrite   overwrite data
590   * @return request obkect
591   */
592  public static RestoreRequest createRestoreRequest(String backupRootDir, String backupId,
593    boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite) {
594    return createRestoreRequest(backupRootDir, backupId, check, fromTables, toTables, isOverwrite,
595      false);
596  }
597
598  public static RestoreRequest createRestoreRequest(String backupRootDir, String backupId,
599    boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite,
600    boolean isKeepOriginalSplits) {
601    RestoreRequest.Builder builder = new RestoreRequest.Builder();
602    RestoreRequest request = builder.withBackupRootDir(backupRootDir).withBackupId(backupId)
603      .withCheck(check).withFromTables(fromTables).withToTables(toTables).withOverwrite(isOverwrite)
604      .withKeepOriginalSplits(isKeepOriginalSplits).build();
605    return request;
606  }
607
608  public static boolean validate(List<TableName> tables, BackupManifest backupManifest,
609    Configuration conf) throws IOException {
610    boolean isValid = true;
611
612    for (TableName table : tables) {
613      TreeSet<BackupImage> imageSet = new TreeSet<>();
614
615      ArrayList<BackupImage> depList = backupManifest.getDependentListByTable(table);
616      if (depList != null && !depList.isEmpty()) {
617        imageSet.addAll(depList);
618      }
619
620      LOG.info("Dependent image(s) from old to new:");
621      for (BackupImage image : imageSet) {
622        String imageDir =
623          HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table);
624        if (!BackupUtils.checkPathExist(imageDir, conf)) {
625          LOG.error("ERROR: backup image does not exist: " + imageDir);
626          isValid = false;
627          break;
628        }
629        LOG.info("Backup image: " + image.getBackupId() + " for '" + table + "' is available");
630      }
631    }
632    return isValid;
633  }
634
635  public static Path getBulkOutputDir(Path restoreRootDir, String tableName, Configuration conf,
636    boolean deleteOnExit) throws IOException {
637    FileSystem fs = restoreRootDir.getFileSystem(conf);
638    Path path = new Path(restoreRootDir,
639      "bulk_output-" + tableName + "-" + EnvironmentEdgeManager.currentTime());
640    if (deleteOnExit) {
641      fs.deleteOnExit(path);
642    }
643    return path;
644  }
645
646  public static Path getBulkOutputDir(Path restoreRootDir, String tableName, Configuration conf)
647    throws IOException {
648    return getBulkOutputDir(restoreRootDir, tableName, conf, true);
649  }
650
651  public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit)
652    throws IOException {
653    FileSystem fs = FileSystem.get(conf);
654    return getBulkOutputDir(getTmpRestoreOutputDir(fs, conf), tableName, conf, deleteOnExit);
655  }
656
657  /**
658   * Build temporary output path
659   * @param fs   filesystem for default output dir
660   * @param conf configuration
661   * @return output path
662   */
663  public static Path getTmpRestoreOutputDir(FileSystem fs, Configuration conf) {
664    String tmp =
665      conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + "/hbase-staging");
666    return new Path(tmp);
667  }
668
669  public static String getFileNameCompatibleString(TableName table) {
670    return table.getNamespaceAsString() + "-" + table.getQualifierAsString();
671  }
672
673  public static boolean failed(int result) {
674    return result != 0;
675  }
676
677  public static boolean succeeded(int result) {
678    return result == 0;
679  }
680
681  public static BulkLoadHFiles createLoader(Configuration config) {
682    // set configuration for restore:
683    // LoadIncrementalHFile needs more time
684    // <name>hbase.rpc.timeout</name> <value>600000</value>
685    // calculates
686    Configuration conf = new Configuration(config);
687    conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, MILLISEC_IN_HOUR);
688
689    // By default, it is 32 and loader will fail if # of files in any region exceed this
690    // limit. Bad for snapshot restore.
691    conf.setInt(BulkLoadHFiles.MAX_FILES_PER_REGION_PER_FAMILY, Integer.MAX_VALUE);
692    conf.set(BulkLoadHFiles.IGNORE_UNMATCHED_CF_CONF_KEY, "yes");
693    return BulkLoadHFiles.create(conf);
694  }
695
696  public static String findMostRecentBackupId(String[] backupIds) {
697    long recentTimestamp = Long.MIN_VALUE;
698    for (String backupId : backupIds) {
699      long ts = Long.parseLong(Iterators.get(Splitter.on('_').split(backupId).iterator(), 1));
700      if (ts > recentTimestamp) {
701        recentTimestamp = ts;
702      }
703    }
704    return BackupRestoreConstants.BACKUPID_PREFIX + recentTimestamp;
705  }
706
707  /**
708   * roll WAL writer for all region servers and record the newest log roll result
709   */
710  public static void logRoll(Connection conn, String backupRootDir, Configuration conf)
711    throws IOException {
712    boolean legacy = conf.getBoolean("hbase.backup.logroll.legacy.used", false);
713    if (legacy) {
714      logRollV1(conn, backupRootDir);
715    } else {
716      logRollV2(conn, backupRootDir);
717    }
718  }
719
720  private static void logRollV1(Connection conn, String backupRootDir) throws IOException {
721    try (Admin admin = conn.getAdmin()) {
722      admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
723        LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME,
724        ImmutableMap.of("backupRoot", backupRootDir));
725    }
726  }
727
728  private static void logRollV2(Connection conn, String backupRootDir) throws IOException {
729    BackupSystemTable backupSystemTable = new BackupSystemTable(conn);
730    HashMap<String, Long> lastLogRollResult =
731      backupSystemTable.readRegionServerLastLogRollResult(backupRootDir);
732    try (Admin admin = conn.getAdmin()) {
733      Map<ServerName, Long> newLogRollResult = admin.rollAllWALWriters();
734
735      for (Map.Entry<ServerName, Long> entry : newLogRollResult.entrySet()) {
736        ServerName serverName = entry.getKey();
737        long newHighestWALFilenum = entry.getValue();
738
739        String address = serverName.getAddress().toString();
740        Long lastHighestWALFilenum = lastLogRollResult.get(address);
741        if (lastHighestWALFilenum != null && lastHighestWALFilenum > newHighestWALFilenum) {
742          LOG.warn("Won't update last roll log result for server {}: current = {}, new = {}",
743            serverName, lastHighestWALFilenum, newHighestWALFilenum);
744        } else {
745          backupSystemTable.writeRegionServerLastLogRollResult(address, newHighestWALFilenum,
746            backupRootDir);
747          if (LOG.isDebugEnabled()) {
748            LOG.debug("updated last roll log result for {} from {} to {}", serverName,
749              lastHighestWALFilenum, newHighestWALFilenum);
750          }
751        }
752      }
753    }
754  }
755}