001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.util;
020
021import java.io.FileNotFoundException;
022import java.io.IOException;
023import java.lang.reflect.InvocationTargetException;
024import java.lang.reflect.Method;
025import java.net.URI;
026import java.net.URISyntaxException;
027import java.util.List;
028import java.util.Locale;
029import java.util.Map;
030import java.util.Objects;
031import java.util.concurrent.ConcurrentHashMap;
032import org.apache.hadoop.HadoopIllegalArgumentException;
033import org.apache.hadoop.conf.Configuration;
034import org.apache.hadoop.fs.FSDataOutputStream;
035import org.apache.hadoop.fs.FileStatus;
036import org.apache.hadoop.fs.FileSystem;
037import org.apache.hadoop.fs.LocatedFileStatus;
038import org.apache.hadoop.fs.Path;
039import org.apache.hadoop.fs.PathFilter;
040import org.apache.hadoop.fs.RemoteIterator;
041import org.apache.hadoop.fs.permission.FsPermission;
042import org.apache.hadoop.hbase.HConstants;
043import org.apache.hadoop.hbase.TableName;
044import org.apache.hadoop.ipc.RemoteException;
045import org.apache.yetus.audience.InterfaceAudience;
046import org.slf4j.Logger;
047import org.slf4j.LoggerFactory;
048
049import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
050import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
051
052/**
053 * Utility methods for interacting with the underlying file system.
054 * <p/>
055 * Note that {@link #setStoragePolicy(FileSystem, Path, String)} is tested in TestFSUtils and
056 * pre-commit will run the hbase-server tests if there's code change in this class. See
057 * <a href="https://issues.apache.org/jira/browse/HBASE-20838">HBASE-20838</a> for more details.
058 */
059@InterfaceAudience.Private
060public abstract class CommonFSUtils {
061  private static final Logger LOG = LoggerFactory.getLogger(CommonFSUtils.class);
062
063  /** Parameter name for HBase WAL directory */
064  public static final String HBASE_WAL_DIR = "hbase.wal.dir";
065
066  /** Parameter to disable stream capability enforcement checks */
067  public static final String UNSAFE_STREAM_CAPABILITY_ENFORCE = "hbase.unsafe.stream.capability.enforce";
068
069  /** Full access permissions (starting point for a umask) */
070  public static final String FULL_RWX_PERMISSIONS = "777";
071
072  protected CommonFSUtils() {
073    super();
074  }
075
076  /**
077   * Compare of path component. Does not consider schema; i.e. if schemas
078   * different but <code>path</code> starts with <code>rootPath</code>,
079   * then the function returns true
080   * @param rootPath value to check for
081   * @param path subject to check
082   * @return True if <code>path</code> starts with <code>rootPath</code>
083   */
084  public static boolean isStartingWithPath(final Path rootPath, final String path) {
085    String uriRootPath = rootPath.toUri().getPath();
086    String tailUriPath = (new Path(path)).toUri().getPath();
087    return tailUriPath.startsWith(uriRootPath);
088  }
089
090  /**
091   * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
092   * '/a/b/c' part. Does not consider schema; i.e. if schemas different but path or subpath matches,
093   * the two will equate.
094   * @param pathToSearch Path we will be trying to match against.
095   * @param pathTail what to match
096   * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
097   */
098  public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
099    return isMatchingTail(pathToSearch, new Path(pathTail));
100  }
101
102  /**
103   * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
104   * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true.  Does not consider
105   * schema; i.e. if schemas different but path or subpath matches, the two will equate.
106   * @param pathToSearch Path we will be trying to match agains against
107   * @param pathTail what to match
108   * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
109   */
110  public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
111    if (pathToSearch.depth() != pathTail.depth()) {
112      return false;
113    }
114    Path tailPath = pathTail;
115    String tailName;
116    Path toSearch = pathToSearch;
117    String toSearchName;
118    boolean result = false;
119    do {
120      tailName = tailPath.getName();
121      if (tailName == null || tailName.length() <= 0) {
122        result = true;
123        break;
124      }
125      toSearchName = toSearch.getName();
126      if (toSearchName == null || toSearchName.length() <= 0) {
127        break;
128      }
129      // Move up a parent on each path for next go around.  Path doesn't let us go off the end.
130      tailPath = tailPath.getParent();
131      toSearch = toSearch.getParent();
132    } while(tailName.equals(toSearchName));
133    return result;
134  }
135
136  /**
137   * Delete if exists.
138   * @param fs filesystem object
139   * @param dir directory to delete
140   * @return True if deleted <code>dir</code>
141   * @throws IOException e
142   */
143  public static boolean deleteDirectory(final FileSystem fs, final Path dir)
144  throws IOException {
145    return fs.exists(dir) && fs.delete(dir, true);
146  }
147
148  /**
149   * Return the number of bytes that large input files should be optimally
150   * be split into to minimize i/o time.
151   *
152   * use reflection to search for getDefaultBlockSize(Path f)
153   * if the method doesn't exist, fall back to using getDefaultBlockSize()
154   *
155   * @param fs filesystem object
156   * @return the default block size for the path's filesystem
157   * @throws IOException e
158   */
159  public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
160    Method m = null;
161    Class<? extends FileSystem> cls = fs.getClass();
162    try {
163      m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
164    } catch (NoSuchMethodException e) {
165      LOG.info("FileSystem doesn't support getDefaultBlockSize");
166    } catch (SecurityException e) {
167      LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
168      m = null; // could happen on setAccessible()
169    }
170    if (m == null) {
171      return fs.getDefaultBlockSize(path);
172    } else {
173      try {
174        Object ret = m.invoke(fs, path);
175        return ((Long)ret).longValue();
176      } catch (Exception e) {
177        throw new IOException(e);
178      }
179    }
180  }
181
182  /*
183   * Get the default replication.
184   *
185   * use reflection to search for getDefaultReplication(Path f)
186   * if the method doesn't exist, fall back to using getDefaultReplication()
187   *
188   * @param fs filesystem object
189   * @param f path of file
190   * @return default replication for the path's filesystem
191   * @throws IOException e
192   */
193  public static short getDefaultReplication(final FileSystem fs, final Path path)
194      throws IOException {
195    Method m = null;
196    Class<? extends FileSystem> cls = fs.getClass();
197    try {
198      m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
199    } catch (NoSuchMethodException e) {
200      LOG.info("FileSystem doesn't support getDefaultReplication");
201    } catch (SecurityException e) {
202      LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
203      m = null; // could happen on setAccessible()
204    }
205    if (m == null) {
206      return fs.getDefaultReplication(path);
207    } else {
208      try {
209        Object ret = m.invoke(fs, path);
210        return ((Number)ret).shortValue();
211      } catch (Exception e) {
212        throw new IOException(e);
213      }
214    }
215  }
216
217  /**
218   * Returns the default buffer size to use during writes.
219   *
220   * The size of the buffer should probably be a multiple of hardware
221   * page size (4096 on Intel x86), and it determines how much data is
222   * buffered during read and write operations.
223   *
224   * @param fs filesystem object
225   * @return default buffer size to use during writes
226   */
227  public static int getDefaultBufferSize(final FileSystem fs) {
228    return fs.getConf().getInt("io.file.buffer.size", 4096);
229  }
230
231  /**
232   * Create the specified file on the filesystem. By default, this will:
233   * <ol>
234   * <li>apply the umask in the configuration (if it is enabled)</li>
235   * <li>use the fs configured buffer size (or 4096 if not set)</li>
236   * <li>use the default replication</li>
237   * <li>use the default block size</li>
238   * <li>not track progress</li>
239   * </ol>
240   *
241   * @param fs {@link FileSystem} on which to write the file
242   * @param path {@link Path} to the file to write
243   * @param perm intial permissions
244   * @param overwrite Whether or not the created file should be overwritten.
245   * @return output stream to the created file
246   * @throws IOException if the file cannot be created
247   */
248  public static FSDataOutputStream create(FileSystem fs, Path path,
249      FsPermission perm, boolean overwrite) throws IOException {
250    if (LOG.isTraceEnabled()) {
251      LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
252    }
253    return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
254        getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
255  }
256
257  /**
258   * Get the file permissions specified in the configuration, if they are
259   * enabled.
260   *
261   * @param fs filesystem that the file will be created on.
262   * @param conf configuration to read for determining if permissions are
263   *          enabled and which to use
264   * @param permssionConfKey property key in the configuration to use when
265   *          finding the permission
266   * @return the permission to use when creating a new file on the fs. If
267   *         special permissions are not specified in the configuration, then
268   *         the default permissions on the the fs will be returned.
269   */
270  public static FsPermission getFilePermissions(final FileSystem fs,
271      final Configuration conf, final String permssionConfKey) {
272    boolean enablePermissions = conf.getBoolean(
273        HConstants.ENABLE_DATA_FILE_UMASK, false);
274
275    if (enablePermissions) {
276      try {
277        FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
278        // make sure that we have a mask, if not, go default.
279        String mask = conf.get(permssionConfKey);
280        if (mask == null) {
281          return FsPermission.getFileDefault();
282        }
283        // appy the umask
284        FsPermission umask = new FsPermission(mask);
285        return perm.applyUMask(umask);
286      } catch (IllegalArgumentException e) {
287        LOG.warn(
288            "Incorrect umask attempted to be created: "
289                + conf.get(permssionConfKey)
290                + ", using default file permissions.", e);
291        return FsPermission.getFileDefault();
292      }
293    }
294    return FsPermission.getFileDefault();
295  }
296
297  /**
298   * Verifies root directory path is a valid URI with a scheme
299   *
300   * @param root root directory path
301   * @return Passed <code>root</code> argument.
302   * @throws IOException if not a valid URI with a scheme
303   */
304  public static Path validateRootPath(Path root) throws IOException {
305    try {
306      URI rootURI = new URI(root.toString());
307      String scheme = rootURI.getScheme();
308      if (scheme == null) {
309        throw new IOException("Root directory does not have a scheme");
310      }
311      return root;
312    } catch (URISyntaxException e) {
313      throw new IOException("Root directory path is not a valid " +
314        "URI -- check your " + HConstants.HBASE_DIR + " configuration", e);
315    }
316  }
317
318  /**
319   * Checks for the presence of the WAL log root path (using the provided conf object) in the given
320   * path. If it exists, this method removes it and returns the String representation of remaining
321   * relative path.
322   * @param path must not be null
323   * @param conf must not be null
324   * @return String representation of the remaining relative path
325   * @throws IOException from underlying filesystem
326   */
327  public static String removeWALRootPath(Path path, final Configuration conf) throws IOException {
328    Path root = getWALRootDir(conf);
329    String pathStr = path.toString();
330    // check that the path is absolute... it has the root path in it.
331    if (!pathStr.startsWith(root.toString())) {
332      return pathStr;
333    }
334    // if not, return as it is.
335    return pathStr.substring(root.toString().length() + 1);// remove the "/" too.
336  }
337
338  /**
339   * Return the 'path' component of a Path.  In Hadoop, Path is a URI.  This
340   * method returns the 'path' component of a Path's URI: e.g. If a Path is
341   * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>,
342   * this method returns <code>/hbase_trunk/TestTable/compaction.dir</code>.
343   * This method is useful if you want to print out a Path without qualifying
344   * Filesystem instance.
345   * @param p Filesystem Path whose 'path' component we are to return.
346   * @return Path portion of the Filesystem
347   */
348  public static String getPath(Path p) {
349    return p.toUri().getPath();
350  }
351
352  /**
353   * @param c configuration
354   * @return {@link Path} to hbase root directory from
355   *     configuration as a qualified Path.
356   * @throws IOException e
357   */
358  public static Path getRootDir(final Configuration c) throws IOException {
359    Path p = new Path(c.get(HConstants.HBASE_DIR));
360    FileSystem fs = p.getFileSystem(c);
361    return p.makeQualified(fs.getUri(), fs.getWorkingDirectory());
362  }
363
364  public static void setRootDir(final Configuration c, final Path root) throws IOException {
365    c.set(HConstants.HBASE_DIR, root.toString());
366  }
367
368  public static void setFsDefault(final Configuration c, final Path root) throws IOException {
369    c.set("fs.defaultFS", root.toString());    // for hadoop 0.21+
370  }
371
372  public static FileSystem getRootDirFileSystem(final Configuration c) throws IOException {
373    Path p = getRootDir(c);
374    return p.getFileSystem(c);
375  }
376
377  /**
378   * @param c configuration
379   * @return {@link Path} to hbase log root directory: e.g. {@value HBASE_WAL_DIR} from
380   *     configuration as a qualified Path. Defaults to HBase root dir.
381   * @throws IOException e
382   */
383  public static Path getWALRootDir(final Configuration c) throws IOException {
384    Path p = new Path(c.get(HBASE_WAL_DIR, c.get(HConstants.HBASE_DIR)));
385    if (!isValidWALRootDir(p, c)) {
386      return getRootDir(c);
387    }
388    FileSystem fs = p.getFileSystem(c);
389    return p.makeQualified(fs.getUri(), fs.getWorkingDirectory());
390  }
391
392  @VisibleForTesting
393  public static void setWALRootDir(final Configuration c, final Path root) throws IOException {
394    c.set(HBASE_WAL_DIR, root.toString());
395  }
396
397  public static FileSystem getWALFileSystem(final Configuration c) throws IOException {
398    Path p = getWALRootDir(c);
399    FileSystem fs = p.getFileSystem(c);
400    // hadoop-core does fs caching, so need to propogate this if set
401    String enforceStreamCapability = c.get(UNSAFE_STREAM_CAPABILITY_ENFORCE);
402    if (enforceStreamCapability != null) {
403      fs.getConf().set(UNSAFE_STREAM_CAPABILITY_ENFORCE, enforceStreamCapability);
404    }
405    return fs;
406  }
407
408  private static boolean isValidWALRootDir(Path walDir, final Configuration c) throws IOException {
409    Path rootDir = getRootDir(c);
410    FileSystem fs = walDir.getFileSystem(c);
411    Path qualifiedWalDir = walDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
412    if (!qualifiedWalDir.equals(rootDir)) {
413      if (qualifiedWalDir.toString().startsWith(rootDir.toString() + "/")) {
414        throw new IllegalStateException("Illegal WAL directory specified. " +
415            "WAL directories are not permitted to be under the root directory if set.");
416      }
417    }
418    return true;
419  }
420
421  /**
422   * Returns the WAL region directory based on the given table name and region name
423   * @param conf configuration to determine WALRootDir
424   * @param tableName Table that the region is under
425   * @param encodedRegionName Region name used for creating the final region directory
426   * @return the region directory used to store WALs under the WALRootDir
427   * @throws IOException if there is an exception determining the WALRootDir
428   */
429  public static Path getWALRegionDir(final Configuration conf, final TableName tableName,
430      final String encodedRegionName) throws IOException {
431    return new Path(getWALTableDir(conf, tableName), encodedRegionName);
432  }
433
434  /**
435   * Returns the Table directory under the WALRootDir for the specified table name
436   * @param conf configuration used to get the WALRootDir
437   * @param tableName Table to get the directory for
438   * @return a path to the WAL table directory for the specified table
439   * @throws IOException if there is an exception determining the WALRootDir
440   */
441  public static Path getWALTableDir(final Configuration conf, final TableName tableName)
442      throws IOException {
443    Path baseDir = new Path(getWALRootDir(conf), HConstants.BASE_NAMESPACE_DIR);
444    return new Path(new Path(baseDir, tableName.getNamespaceAsString()),
445      tableName.getQualifierAsString());
446  }
447
448  /**
449   * For backward compatibility with HBASE-20734, where we store recovered edits in a wrong
450   * directory without BASE_NAMESPACE_DIR. See HBASE-22617 for more details.
451   * @deprecated For compatibility, will be removed in 4.0.0.
452   */
453  @Deprecated
454  public static Path getWrongWALRegionDir(final Configuration conf, final TableName tableName,
455      final String encodedRegionName) throws IOException {
456    Path wrongTableDir = new Path(new Path(getWALRootDir(conf), tableName.getNamespaceAsString()),
457      tableName.getQualifierAsString());
458    return new Path(wrongTableDir, encodedRegionName);
459  }
460
461  /**
462   * Returns the {@link org.apache.hadoop.fs.Path} object representing the table directory under
463   * path rootdir
464   *
465   * @param rootdir qualified path of HBase root directory
466   * @param tableName name of table
467   * @return {@link org.apache.hadoop.fs.Path} for table
468   */
469  public static Path getTableDir(Path rootdir, final TableName tableName) {
470    return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
471        tableName.getQualifierAsString());
472  }
473
474  /**
475   * Returns the {@link org.apache.hadoop.hbase.TableName} object representing
476   * the table directory under
477   * path rootdir
478   *
479   * @param tablePath path of table
480   * @return {@link org.apache.hadoop.fs.Path} for table
481   */
482  public static TableName getTableName(Path tablePath) {
483    return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
484  }
485
486  /**
487   * Returns the {@link org.apache.hadoop.fs.Path} object representing
488   * the namespace directory under path rootdir
489   *
490   * @param rootdir qualified path of HBase root directory
491   * @param namespace namespace name
492   * @return {@link org.apache.hadoop.fs.Path} for table
493   */
494  public static Path getNamespaceDir(Path rootdir, final String namespace) {
495    return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
496        new Path(namespace)));
497  }
498
499  // this mapping means that under a federated FileSystem implementation, we'll
500  // only log the first failure from any of the underlying FileSystems at WARN and all others
501  // will be at DEBUG.
502  private static final Map<FileSystem, Boolean> warningMap =
503      new ConcurrentHashMap<FileSystem, Boolean>();
504
505  /**
506   * Sets storage policy for given path.
507   * If the passed path is a directory, we'll set the storage policy for all files
508   * created in the future in said directory. Note that this change in storage
509   * policy takes place at the FileSystem level; it will persist beyond this RS's lifecycle.
510   * If we're running on a version of FileSystem that doesn't support the given storage policy
511   * (or storage policies at all), then we'll issue a log message and continue.
512   *
513   * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html
514   *
515   * @param fs We only do anything it implements a setStoragePolicy method
516   * @param path the Path whose storage policy is to be set
517   * @param storagePolicy Policy to set on <code>path</code>; see hadoop 2.6+
518   *   org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g
519   *   'COLD', 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.
520   */
521  public static void setStoragePolicy(final FileSystem fs, final Path path,
522      final String storagePolicy) {
523    try {
524      setStoragePolicy(fs, path, storagePolicy, false);
525    } catch (IOException e) {
526      // should never arrive here
527      LOG.warn("We have chosen not to throw exception but some unexpectedly thrown out", e);
528    }
529  }
530
531  static void setStoragePolicy(final FileSystem fs, final Path path, final String storagePolicy,
532      boolean throwException) throws IOException {
533    if (storagePolicy == null) {
534      if (LOG.isTraceEnabled()) {
535        LOG.trace("We were passed a null storagePolicy, exiting early.");
536      }
537      return;
538    }
539    String trimmedStoragePolicy = storagePolicy.trim();
540    if (trimmedStoragePolicy.isEmpty()) {
541      if (LOG.isTraceEnabled()) {
542        LOG.trace("We were passed an empty storagePolicy, exiting early.");
543      }
544      return;
545    } else {
546      trimmedStoragePolicy = trimmedStoragePolicy.toUpperCase(Locale.ROOT);
547    }
548    if (trimmedStoragePolicy.equals(HConstants.DEFER_TO_HDFS_STORAGE_POLICY)) {
549      if (LOG.isTraceEnabled()) {
550        LOG.trace("We were passed the defer-to-hdfs policy {}, exiting early.",
551          trimmedStoragePolicy);
552      }
553      return;
554    }
555    try {
556      invokeSetStoragePolicy(fs, path, trimmedStoragePolicy);
557    } catch (IOException e) {
558      if (LOG.isTraceEnabled()) {
559        LOG.trace("Failed to invoke set storage policy API on FS", e);
560      }
561      if (throwException) {
562        throw e;
563      }
564    }
565  }
566
567  /*
568   * All args have been checked and are good. Run the setStoragePolicy invocation.
569   */
570  private static void invokeSetStoragePolicy(final FileSystem fs, final Path path,
571      final String storagePolicy) throws IOException {
572    Method m = null;
573    Exception toThrow = null;
574    try {
575      m = fs.getClass().getDeclaredMethod("setStoragePolicy",
576        new Class<?>[] { Path.class, String.class });
577      m.setAccessible(true);
578    } catch (NoSuchMethodException e) {
579      toThrow = e;
580      final String msg = "FileSystem doesn't support setStoragePolicy; HDFS-6584, HDFS-9345 " +
581          "not available. This is normal and expected on earlier Hadoop versions.";
582      if (!warningMap.containsKey(fs)) {
583        warningMap.put(fs, true);
584        LOG.warn(msg, e);
585      } else if (LOG.isDebugEnabled()) {
586        LOG.debug(msg, e);
587      }
588      m = null;
589    } catch (SecurityException e) {
590      toThrow = e;
591      final String msg = "No access to setStoragePolicy on FileSystem from the SecurityManager; " +
592          "HDFS-6584, HDFS-9345 not available. This is unusual and probably warrants an email " +
593          "to the user@hbase mailing list. Please be sure to include a link to your configs, and " +
594          "logs that include this message and period of time before it. Logs around service " +
595          "start up will probably be useful as well.";
596      if (!warningMap.containsKey(fs)) {
597        warningMap.put(fs, true);
598        LOG.warn(msg, e);
599      } else if (LOG.isDebugEnabled()) {
600        LOG.debug(msg, e);
601      }
602      m = null; // could happen on setAccessible() or getDeclaredMethod()
603    }
604    if (m != null) {
605      try {
606        m.invoke(fs, path, storagePolicy);
607        if (LOG.isDebugEnabled()) {
608          LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + path);
609        }
610      } catch (Exception e) {
611        toThrow = e;
612        // This swallows FNFE, should we be throwing it? seems more likely to indicate dev
613        // misuse than a runtime problem with HDFS.
614        if (!warningMap.containsKey(fs)) {
615          warningMap.put(fs, true);
616          LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for path=" + path + ". " +
617              "DEBUG log level might have more details.", e);
618        } else if (LOG.isDebugEnabled()) {
619          LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for path=" + path, e);
620        }
621        // check for lack of HDFS-7228
622        if (e instanceof InvocationTargetException) {
623          final Throwable exception = e.getCause();
624          if (exception instanceof RemoteException &&
625              HadoopIllegalArgumentException.class.getName().equals(
626                ((RemoteException)exception).getClassName())) {
627            if (LOG.isDebugEnabled()) {
628              LOG.debug("Given storage policy, '" +storagePolicy +"', was rejected and probably " +
629                "isn't a valid policy for the version of Hadoop you're running. I.e. if you're " +
630                "trying to use SSD related policies then you're likely missing HDFS-7228. For " +
631                "more information see the 'ArchivalStorage' docs for your Hadoop release.");
632            }
633          // Hadoop 2.8+, 3.0-a1+ added FileSystem.setStoragePolicy with a default implementation
634          // that throws UnsupportedOperationException
635          } else if (exception instanceof UnsupportedOperationException) {
636            if (LOG.isDebugEnabled()) {
637              LOG.debug("The underlying FileSystem implementation doesn't support " +
638                  "setStoragePolicy. This is probably intentional on their part, since HDFS-9345 " +
639                  "appears to be present in your version of Hadoop. For more information check " +
640                  "the Hadoop documentation on 'ArchivalStorage', the Hadoop FileSystem " +
641                  "specification docs from HADOOP-11981, and/or related documentation from the " +
642                  "provider of the underlying FileSystem (its name should appear in the " +
643                  "stacktrace that accompanies this message). Note in particular that Hadoop's " +
644                  "local filesystem implementation doesn't support storage policies.", exception);
645            }
646          }
647        }
648      }
649    }
650    if (toThrow != null) {
651      throw new IOException(toThrow);
652    }
653  }
654
655  /**
656   * @param conf must not be null
657   * @return True if this filesystem whose scheme is 'hdfs'.
658   * @throws IOException from underlying FileSystem
659   */
660  public static boolean isHDFS(final Configuration conf) throws IOException {
661    FileSystem fs = FileSystem.get(conf);
662    String scheme = fs.getUri().getScheme();
663    return scheme.equalsIgnoreCase("hdfs");
664  }
665
666  /**
667   * Checks if the given path is the one with 'recovered.edits' dir.
668   * @param path must not be null
669   * @return True if we recovered edits
670   */
671  public static boolean isRecoveredEdits(Path path) {
672    return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
673  }
674
675  /**
676   * @param conf must not be null
677   * @return Returns the filesystem of the hbase rootdir.
678   * @throws IOException from underlying FileSystem
679   */
680  public static FileSystem getCurrentFileSystem(Configuration conf)
681  throws IOException {
682    return getRootDir(conf).getFileSystem(conf);
683  }
684
685  /**
686   * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
687   * This accommodates differences between hadoop versions, where hadoop 1
688   * does not throw a FileNotFoundException, and return an empty FileStatus[]
689   * while Hadoop 2 will throw FileNotFoundException.
690   *
691   * Where possible, prefer FSUtils#listStatusWithStatusFilter(FileSystem,
692   * Path, FileStatusFilter) instead.
693   *
694   * @param fs file system
695   * @param dir directory
696   * @param filter path filter
697   * @return null if dir is empty or doesn't exist, otherwise FileStatus array
698   */
699  public static FileStatus [] listStatus(final FileSystem fs,
700      final Path dir, final PathFilter filter) throws IOException {
701    FileStatus [] status = null;
702    try {
703      status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
704    } catch (FileNotFoundException fnfe) {
705      // if directory doesn't exist, return null
706      if (LOG.isTraceEnabled()) {
707        LOG.trace(dir + " doesn't exist");
708      }
709    }
710    if (status == null || status.length < 1) {
711      return null;
712    }
713    return status;
714  }
715
716  /**
717   * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
718   * This would accommodates differences between hadoop versions
719   *
720   * @param fs file system
721   * @param dir directory
722   * @return null if dir is empty or doesn't exist, otherwise FileStatus array
723   */
724  public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
725    return listStatus(fs, dir, null);
726  }
727
728  /**
729   * Calls fs.listFiles() to get FileStatus and BlockLocations together for reducing rpc call
730   *
731   * @param fs file system
732   * @param dir directory
733   * @return LocatedFileStatus list
734   */
735  public static List<LocatedFileStatus> listLocatedStatus(final FileSystem fs,
736      final Path dir) throws IOException {
737    List<LocatedFileStatus> status = null;
738    try {
739      RemoteIterator<LocatedFileStatus> locatedFileStatusRemoteIterator = fs
740          .listFiles(dir, false);
741      while (locatedFileStatusRemoteIterator.hasNext()) {
742        if (status == null) {
743          status = Lists.newArrayList();
744        }
745        status.add(locatedFileStatusRemoteIterator.next());
746      }
747    } catch (FileNotFoundException fnfe) {
748      // if directory doesn't exist, return null
749      if (LOG.isTraceEnabled()) {
750        LOG.trace(dir + " doesn't exist");
751      }
752    }
753    return status;
754  }
755
756  /**
757   * Calls fs.delete() and returns the value returned by the fs.delete()
758   *
759   * @param fs must not be null
760   * @param path must not be null
761   * @param recursive delete tree rooted at path
762   * @return the value returned by the fs.delete()
763   * @throws IOException from underlying FileSystem
764   */
765  public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
766      throws IOException {
767    return fs.delete(path, recursive);
768  }
769
770  /**
771   * Calls fs.exists(). Checks if the specified path exists
772   *
773   * @param fs must not be null
774   * @param path must not be null
775   * @return the value returned by fs.exists()
776   * @throws IOException from underlying FileSystem
777   */
778  public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
779    return fs.exists(path);
780  }
781
782  /**
783   * Log the current state of the filesystem from a certain root directory
784   * @param fs filesystem to investigate
785   * @param root root file/directory to start logging from
786   * @param LOG log to output information
787   * @throws IOException if an unexpected exception occurs
788   */
789  public static void logFileSystemState(final FileSystem fs, final Path root, Logger LOG)
790      throws IOException {
791    LOG.debug("File system contents for path " + root);
792    logFSTree(LOG, fs, root, "|-");
793  }
794
795  /**
796   * Recursive helper to log the state of the FS
797   *
798   * @see #logFileSystemState(FileSystem, Path, Logger)
799   */
800  private static void logFSTree(Logger LOG, final FileSystem fs, final Path root, String prefix)
801      throws IOException {
802    FileStatus[] files = listStatus(fs, root, null);
803    if (files == null) {
804      return;
805    }
806
807    for (FileStatus file : files) {
808      if (file.isDirectory()) {
809        LOG.debug(prefix + file.getPath().getName() + "/");
810        logFSTree(LOG, fs, file.getPath(), prefix + "---");
811      } else {
812        LOG.debug(prefix + file.getPath().getName());
813      }
814    }
815  }
816
817  public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
818      throws IOException {
819    // set the modify time for TimeToLive Cleaner
820    fs.setTimes(src, EnvironmentEdgeManager.currentTime(), -1);
821    return fs.rename(src, dest);
822  }
823
824  /**
825   * Do our short circuit read setup.
826   * Checks buffer size to use and whether to do checksumming in hbase or hdfs.
827   * @param conf must not be null
828   */
829  public static void setupShortCircuitRead(final Configuration conf) {
830    // Check that the user has not set the "dfs.client.read.shortcircuit.skip.checksum" property.
831    boolean shortCircuitSkipChecksum =
832      conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
833    boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
834    if (shortCircuitSkipChecksum) {
835      LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
836        "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
837        "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
838      assert !shortCircuitSkipChecksum; //this will fail if assertions are on
839    }
840    checkShortCircuitReadBufferSize(conf);
841  }
842
843  /**
844   * Check if short circuit read buffer size is set and if not, set it to hbase value.
845   * @param conf must not be null
846   */
847  public static void checkShortCircuitReadBufferSize(final Configuration conf) {
848    final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
849    final int notSet = -1;
850    // DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY is only defined in h2
851    final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
852    int size = conf.getInt(dfsKey, notSet);
853    // If a size is set, return -- we will use it.
854    if (size != notSet) {
855      return;
856    }
857    // But short circuit buffer size is normally not set.  Put in place the hbase wanted size.
858    int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
859    conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
860  }
861
862  private static class DfsBuilderUtility {
863    static Class<?> dfsClass = null;
864    static Method createMethod;
865    static Method overwriteMethod;
866    static Method bufferSizeMethod;
867    static Method blockSizeMethod;
868    static Method recursiveMethod;
869    static Method replicateMethod;
870    static Method replicationMethod;
871    static Method buildMethod;
872    static boolean allMethodsPresent = false;
873
874    static {
875      String dfsName = "org.apache.hadoop.hdfs.DistributedFileSystem";
876      String builderName = dfsName + "$HdfsDataOutputStreamBuilder";
877      Class<?> builderClass = null;
878
879      try {
880        dfsClass = Class.forName(dfsName);
881      } catch (ClassNotFoundException e) {
882        LOG.debug("{} not available, will not use builder API for file creation.", dfsName);
883      }
884      try {
885        builderClass = Class.forName(builderName);
886      } catch (ClassNotFoundException e) {
887        LOG.debug("{} not available, will not use builder API for file creation.", builderName);
888      }
889
890      if (dfsClass != null && builderClass != null) {
891        try {
892          createMethod = dfsClass.getMethod("createFile", Path.class);
893          overwriteMethod = builderClass.getMethod("overwrite", boolean.class);
894          bufferSizeMethod = builderClass.getMethod("bufferSize", int.class);
895          blockSizeMethod = builderClass.getMethod("blockSize", long.class);
896          recursiveMethod = builderClass.getMethod("recursive");
897          replicateMethod = builderClass.getMethod("replicate");
898          replicationMethod = builderClass.getMethod("replication", short.class);
899          buildMethod = builderClass.getMethod("build");
900
901          allMethodsPresent = true;
902          LOG.debug("Using builder API via reflection for DFS file creation.");
903        } catch (NoSuchMethodException e) {
904          LOG.debug("Could not find method on builder; will use old DFS API for file creation {}",
905              e.getMessage());
906        }
907      }
908    }
909
910    /**
911     * Attempt to use builder API via reflection to create a file with the given parameters and
912     * replication enabled.
913     */
914    static FSDataOutputStream createHelper(FileSystem fs, Path path, boolean overwritable,
915        int bufferSize, short replication, long blockSize, boolean isRecursive) throws IOException {
916      if (allMethodsPresent && dfsClass.isInstance(fs)) {
917        try {
918          Object builder;
919
920          builder = createMethod.invoke(fs, path);
921          builder = overwriteMethod.invoke(builder, overwritable);
922          builder = bufferSizeMethod.invoke(builder, bufferSize);
923          builder = blockSizeMethod.invoke(builder, blockSize);
924          if (isRecursive) {
925            builder = recursiveMethod.invoke(builder);
926          }
927          builder = replicateMethod.invoke(builder);
928          builder = replicationMethod.invoke(builder, replication);
929          return (FSDataOutputStream) buildMethod.invoke(builder);
930        } catch (IllegalAccessException | InvocationTargetException e) {
931          // Should have caught this failure during initialization, so log full trace here
932          LOG.warn("Couldn't use reflection with builder API", e);
933        }
934      }
935
936      if (isRecursive) {
937        return fs.create(path, overwritable, bufferSize, replication, blockSize, null);
938      }
939      return fs.createNonRecursive(path, overwritable, bufferSize, replication, blockSize, null);
940    }
941
942    /**
943     * Attempt to use builder API via reflection to create a file with the given parameters and
944     * replication enabled.
945     */
946    static FSDataOutputStream createHelper(FileSystem fs, Path path, boolean overwritable)
947        throws IOException {
948      if (allMethodsPresent && dfsClass.isInstance(fs)) {
949        try {
950          Object builder;
951
952          builder = createMethod.invoke(fs, path);
953          builder = overwriteMethod.invoke(builder, overwritable);
954          builder = replicateMethod.invoke(builder);
955          return (FSDataOutputStream) buildMethod.invoke(builder);
956        } catch (IllegalAccessException | InvocationTargetException e) {
957          // Should have caught this failure during initialization, so log full trace here
958          LOG.warn("Couldn't use reflection with builder API", e);
959        }
960      }
961
962      return fs.create(path, overwritable);
963    }
964  }
965
966  /**
967   * Attempt to use builder API via reflection to create a file with the given parameters and
968   * replication enabled.
969   * <p>
970   * Will not attempt to enable replication when passed an HFileSystem.
971   */
972  public static FSDataOutputStream createForWal(FileSystem fs, Path path, boolean overwritable)
973      throws IOException {
974    return DfsBuilderUtility.createHelper(fs, path, overwritable);
975  }
976
977  /**
978   * Attempt to use builder API via reflection to create a file with the given parameters and
979   * replication enabled.
980   * <p>
981   * Will not attempt to enable replication when passed an HFileSystem.
982   */
983  public static FSDataOutputStream createForWal(FileSystem fs, Path path, boolean overwritable,
984      int bufferSize, short replication, long blockSize, boolean isRecursive) throws IOException {
985    return DfsBuilderUtility.createHelper(fs, path, overwritable, bufferSize, replication,
986        blockSize, isRecursive);
987  }
988
989  // Holder singleton idiom. JVM spec ensures this will be run at most once per Classloader, and
990  // not until we attempt to reference it.
991  private static class StreamCapabilities {
992    public static final boolean PRESENT;
993    public static final Class<?> CLASS;
994    public static final Method METHOD;
995    static {
996      boolean tmp = false;
997      Class<?> clazz = null;
998      Method method = null;
999      try {
1000        clazz = Class.forName("org.apache.hadoop.fs.StreamCapabilities");
1001        method = clazz.getMethod("hasCapability", String.class);
1002        tmp = true;
1003      } catch(ClassNotFoundException|NoSuchMethodException|SecurityException exception) {
1004        LOG.warn("Your Hadoop installation does not include the StreamCapabilities class from " +
1005                 "HDFS-11644, so we will skip checking if any FSDataOutputStreams actually " +
1006                 "support hflush/hsync. If you are running on top of HDFS this probably just " +
1007                 "means you have an older version and this can be ignored. If you are running on " +
1008                 "top of an alternate FileSystem implementation you should manually verify that " +
1009                 "hflush and hsync are implemented; otherwise you risk data loss and hard to " +
1010                 "diagnose errors when our assumptions are violated.");
1011        LOG.debug("The first request to check for StreamCapabilities came from this stacktrace.",
1012            exception);
1013      } finally {
1014        PRESENT = tmp;
1015        CLASS = clazz;
1016        METHOD = method;
1017      }
1018    }
1019  }
1020
1021  /**
1022   * If our FileSystem version includes the StreamCapabilities class, check if the given stream has
1023   * a particular capability.
1024   * @param stream capabilities are per-stream instance, so check this one specifically. must not be
1025   *          null
1026   * @param capability what to look for, per Hadoop Common's FileSystem docs
1027   * @return true if there are no StreamCapabilities. false if there are, but this stream doesn't
1028   *         implement it. return result of asking the stream otherwise.
1029   * @throws NullPointerException if {@code stream} is {@code null}
1030   */
1031  public static boolean hasCapability(FSDataOutputStream stream, String capability) {
1032    // be consistent whether or not StreamCapabilities is present
1033    Objects.requireNonNull(stream, "stream cannot be null");
1034    // If o.a.h.fs.StreamCapabilities doesn't exist, assume everyone does everything
1035    // otherwise old versions of Hadoop will break.
1036    boolean result = true;
1037    if (StreamCapabilities.PRESENT) {
1038      // if StreamCapabilities is present, but the stream doesn't implement it
1039      // or we run into a problem invoking the method,
1040      // we treat that as equivalent to not declaring anything
1041      result = false;
1042      if (StreamCapabilities.CLASS.isAssignableFrom(stream.getClass())) {
1043        try {
1044          result = ((Boolean)StreamCapabilities.METHOD.invoke(stream, capability)).booleanValue();
1045        } catch (IllegalAccessException|IllegalArgumentException|InvocationTargetException
1046            exception) {
1047          LOG.warn("Your Hadoop installation's StreamCapabilities implementation doesn't match " +
1048              "our understanding of how it's supposed to work. Please file a JIRA and include " +
1049              "the following stack trace. In the mean time we're interpreting this behavior " +
1050              "difference as a lack of capability support, which will probably cause a failure.",
1051              exception);
1052        }
1053      }
1054    }
1055    return result;
1056  }
1057
1058  /**
1059   * Helper exception for those cases where the place where we need to check a stream capability
1060   * is not where we have the needed context to explain the impact and mitigation for a lack.
1061   */
1062  public static class StreamLacksCapabilityException extends Exception {
1063    public StreamLacksCapabilityException(String message, Throwable cause) {
1064      super(message, cause);
1065    }
1066    public StreamLacksCapabilityException(String message) {
1067      super(message);
1068    }
1069  }
1070}