View Javadoc

1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.util;
21  
22  import java.io.DataInputStream;
23  import java.io.EOFException;
24  import java.io.FileNotFoundException;
25  import java.io.IOException;
26  import java.lang.reflect.Method;
27  import java.net.URI;
28  import java.net.URISyntaxException;
29  import java.util.ArrayList;
30  import java.util.HashMap;
31  import java.util.List;
32  import java.util.Map;
33  import java.util.regex.Pattern;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.conf.Configuration;
38  import org.apache.hadoop.fs.BlockLocation;
39  import org.apache.hadoop.fs.FSDataInputStream;
40  import org.apache.hadoop.fs.FSDataOutputStream;
41  import org.apache.hadoop.fs.FileStatus;
42  import org.apache.hadoop.fs.FileSystem;
43  import org.apache.hadoop.fs.Path;
44  import org.apache.hadoop.fs.PathFilter;
45  import org.apache.hadoop.fs.permission.FsAction;
46  import org.apache.hadoop.fs.permission.FsPermission;
47  import org.apache.hadoop.hbase.HBaseFileSystem;
48  import org.apache.hadoop.hbase.HColumnDescriptor;
49  import org.apache.hadoop.hbase.HConstants;
50  import org.apache.hadoop.hbase.HDFSBlocksDistribution;
51  import org.apache.hadoop.hbase.HRegionInfo;
52  import org.apache.hadoop.hbase.RemoteExceptionHandler;
53  import org.apache.hadoop.hbase.master.HMaster;
54  import org.apache.hadoop.hbase.regionserver.HRegion;
55  import org.apache.hadoop.hbase.security.User;
56  import org.apache.hadoop.hdfs.DistributedFileSystem;
57  import org.apache.hadoop.io.SequenceFile;
58  import org.apache.hadoop.security.AccessControlException;
59  import org.apache.hadoop.util.ReflectionUtils;
60  import org.apache.hadoop.util.StringUtils;
61  
62  /**
63   * Utility methods for interacting with the underlying file system.
64   */
65  public abstract class FSUtils {
66    private static final Log LOG = LogFactory.getLog(FSUtils.class);
67  
68    /** Full access permissions (starting point for a umask) */
69    private static final String FULL_RWX_PERMISSIONS = "777";
70  
71    protected FSUtils() {
72      super();
73    }
74  
75    public static FSUtils getInstance(FileSystem fs, Configuration conf) {
76      String scheme = fs.getUri().getScheme();
77      if (scheme == null) {
78        LOG.warn("Could not find scheme for uri " +
79            fs.getUri() + ", default to hdfs");
80        scheme = "hdfs";
81      }
82      Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
83          scheme + ".impl", FSHDFSUtils.class); // Default to HDFS impl
84      FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
85      return fsUtils;
86    }
87  
88    /**
89     * Delete if exists.
90     * @param fs filesystem object
91     * @param dir directory to delete
92     * @return True if deleted <code>dir</code>
93     * @throws IOException e
94     */
95    public static boolean deleteDirectory(final FileSystem fs, final Path dir)
96    throws IOException {
97      return fs.exists(dir) && fs.delete(dir, true);
98    }
99  
100   /**
101    * Check if directory exists.  If it does not, create it.
102    * @param fs filesystem object
103    * @param dir path to check
104    * @return Path
105    * @throws IOException e
106    */
107   public Path checkdir(final FileSystem fs, final Path dir) throws IOException {
108     if (!fs.exists(dir)) {
109       HBaseFileSystem.makeDirOnFileSystem(fs, dir);
110     }
111     return dir;
112   }
113 
114   /**
115    * Create the specified file on the filesystem. By default, this will:
116    * <ol>
117    * <li>overwrite the file if it exists</li>
118    * <li>apply the umask in the configuration (if it is enabled)</li>
119    * <li>use the fs configured buffer size (or {@value DEFAULT_BUFFER_SIZE} if
120    * not set)</li>
121    * <li>use the default replication</li>
122    * <li>use the default block size</li>
123    * <li>not track progress</li>
124    * </ol>
125    *
126    * @param fs {@link FileSystem} on which to write the file
127    * @param path {@link Path} to the file to write
128    * @return output stream to the created file
129    * @throws IOException if the file cannot be created
130    */
131   public static FSDataOutputStream create(FileSystem fs, Path path,
132       FsPermission perm) throws IOException {
133     return create(fs, path, perm, true);
134   }
135 
136   /**
137    * Create the specified file on the filesystem. By default, this will:
138    * <ol>
139    * <li>apply the umask in the configuration (if it is enabled)</li>
140    * <li>use the fs configured buffer size (or {@value DEFAULT_BUFFER_SIZE} if
141    * not set)</li>
142    * <li>use the default replication</li>
143    * <li>use the default block size</li>
144    * <li>not track progress</li>
145    * </ol>
146    *
147    * @param fs {@link FileSystem} on which to write the file
148    * @param path {@link Path} to the file to write
149    * @param perm
150    * @param overwrite Whether or not the created file should be overwritten.
151    * @return output stream to the created file
152    * @throws IOException if the file cannot be created
153    */
154   public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm,
155       boolean overwrite) throws IOException {
156     LOG.debug("Creating file=" + path + " with permission=" + perm);
157     return HBaseFileSystem.createPathWithPermsOnFileSystem(fs, path, perm, overwrite);
158   }
159 
160   /**
161    * Get the file permissions specified in the configuration, if they are
162    * enabled.
163    *
164    * @param fs filesystem that the file will be created on.
165    * @param conf configuration to read for determining if permissions are
166    *          enabled and which to use
167    * @param permssionConfKey property key in the configuration to use when
168    *          finding the permission
169    * @return the permission to use when creating a new file on the fs. If
170    *         special permissions are not specified in the configuration, then
171    *         the default permissions on the the fs will be returned.
172    */
173   public static FsPermission getFilePermissions(final FileSystem fs,
174       final Configuration conf, final String permssionConfKey) {
175     boolean enablePermissions = conf.getBoolean(
176         HConstants.ENABLE_DATA_FILE_UMASK, false);
177 
178     if (enablePermissions) {
179       try {
180         FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
181         // make sure that we have a mask, if not, go default.
182         String mask = conf.get(permssionConfKey);
183         if (mask == null)
184           return FsPermission.getDefault();
185         // appy the umask
186         FsPermission umask = new FsPermission(mask);
187         return perm.applyUMask(umask);
188       } catch (IllegalArgumentException e) {
189         LOG.warn(
190             "Incorrect umask attempted to be created: "
191                 + conf.get(permssionConfKey)
192                 + ", using default file permissions.", e);
193         return FsPermission.getDefault();
194       }
195     }
196     return FsPermission.getDefault();
197   }
198 
199   /**
200    * Checks to see if the specified file system is available
201    *
202    * @param fs filesystem
203    * @throws IOException e
204    */
205   public static void checkFileSystemAvailable(final FileSystem fs)
206   throws IOException {
207     if (!(fs instanceof DistributedFileSystem)) {
208       return;
209     }
210     IOException exception = null;
211     DistributedFileSystem dfs = (DistributedFileSystem) fs;
212     try {
213       if (dfs.exists(new Path("/"))) {
214         return;
215       }
216     } catch (IOException e) {
217       exception = RemoteExceptionHandler.checkIOException(e);
218     }
219     try {
220       fs.close();
221     } catch (Exception e) {
222       LOG.error("file system close failed: ", e);
223     }
224     IOException io = new IOException("File system is not available");
225     io.initCause(exception);
226     throw io;
227   }
228 
229   /**
230    * We use reflection because {@link DistributedFileSystem#setSafeMode(
231    * FSConstants.SafeModeAction action, boolean isChecked)} is not in hadoop 1.1
232    * 
233    * @param dfs
234    * @return whether we're in safe mode
235    * @throws IOException
236    */
237   private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
238     boolean inSafeMode = false;
239     try {
240       Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
241           org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
242       inSafeMode = (Boolean) m.invoke(dfs,
243         org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
244     } catch (Exception e) {
245       if (e instanceof IOException) throw (IOException) e;
246       
247       // Check whether dfs is on safemode.
248       inSafeMode = dfs.setSafeMode(
249         org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);      
250     }
251     return inSafeMode;    
252   }
253   
254   /**
255    * Check whether dfs is in safemode.
256    * @param conf
257    * @throws IOException
258    */
259   public static void checkDfsSafeMode(final Configuration conf)
260   throws IOException {
261     boolean isInSafeMode = false;
262     FileSystem fs = FileSystem.get(conf);
263     if (fs instanceof DistributedFileSystem) {
264       DistributedFileSystem dfs = (DistributedFileSystem)fs;
265       isInSafeMode = isInSafeMode(dfs);
266     }
267     if (isInSafeMode) {
268       throw new IOException("File system is in safemode, it can't be written now");
269     }
270   }
271 
272   /**
273    * Verifies current version of file system
274    *
275    * @param fs filesystem object
276    * @param rootdir root hbase directory
277    * @return null if no version file exists, version string otherwise.
278    * @throws IOException e
279    */
280   public static String getVersion(FileSystem fs, Path rootdir)
281   throws IOException {
282     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
283     String version = null;
284     if (fs.exists(versionFile)) {
285       FSDataInputStream s =
286         fs.open(versionFile);
287       try {
288         version = DataInputStream.readUTF(s);
289       } catch (EOFException eof) {
290         LOG.warn("Version file was empty, odd, will try to set it.");
291       } finally {
292         s.close();
293       }
294     }
295     return version;
296   }
297 
298   /**
299    * Verifies current version of file system
300    *
301    * @param fs file system
302    * @param rootdir root directory of HBase installation
303    * @param message if true, issues a message on System.out
304    *
305    * @throws IOException e
306    */
307   public static void checkVersion(FileSystem fs, Path rootdir,
308       boolean message) throws IOException {
309     checkVersion(fs, rootdir, message, 0,
310     		HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
311   }
312 
313   /**
314    * Verifies current version of file system
315    *
316    * @param fs file system
317    * @param rootdir root directory of HBase installation
318    * @param message if true, issues a message on System.out
319    * @param wait wait interval
320    * @param retries number of times to retry
321    *
322    * @throws IOException e
323    */
324   public static void checkVersion(FileSystem fs, Path rootdir,
325       boolean message, int wait, int retries) throws IOException {
326     String version = getVersion(fs, rootdir);
327 
328     if (version == null) {
329       if (!rootRegionExists(fs, rootdir)) {
330         // rootDir is empty (no version file and no root region)
331         // just create new version file (HBASE-1195)
332         FSUtils.setVersion(fs, rootdir, wait, retries);
333         return;
334       }
335     } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0)
336         return;
337 
338     // version is deprecated require migration
339     // Output on stdout so user sees it in terminal.
340     String msg = "HBase file layout needs to be upgraded."
341       + "  You have version " + version
342       + " and I want version " + HConstants.FILE_SYSTEM_VERSION
343       + ".  Is your hbase.rootdir valid?  If so, you may need to run "
344       + "'hbase hbck -fixVersionFile'.";
345     if (message) {
346       System.out.println("WARNING! " + msg);
347     }
348     throw new FileSystemVersionException(msg);
349   }
350 
351   /**
352    * Sets version of file system
353    *
354    * @param fs filesystem object
355    * @param rootdir hbase root
356    * @throws IOException e
357    */
358   public static void setVersion(FileSystem fs, Path rootdir)
359   throws IOException {
360     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
361     		HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
362   }
363 
364   /**
365    * Sets version of file system
366    *
367    * @param fs filesystem object
368    * @param rootdir hbase root
369    * @param wait time to wait for retry
370    * @param retries number of times to retry before failing
371    * @throws IOException e
372    */
373   public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
374   throws IOException {
375     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
376   }
377 
378   /**
379    * Return the number of bytes that large input files should be optimally
380    * be split into to minimize i/o time.
381    *
382    * use reflection to search for getDefaultBlockSize(Path f)
383    * if the method doesn't exist, fall back to using getDefaultBlockSize()
384    *
385    * @param fs filesystem object
386    * @return the default block size for the path's filesystem
387    * @throws IOException e
388    */
389   public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
390     Method m = null;
391     Class<? extends FileSystem> cls = fs.getClass();
392     try {
393       m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
394     } catch (NoSuchMethodException e) {
395       LOG.info("FileSystem doesn't support getDefaultBlockSize");
396     } catch (SecurityException e) {
397       LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
398       m = null; // could happen on setAccessible()
399     }
400     if (m == null) {
401       return fs.getDefaultBlockSize();
402     } else {
403       try {
404         Object ret = m.invoke(fs, path);
405         return ((Long)ret).longValue();
406       } catch (Exception e) {
407         throw new IOException(e);
408       }
409     }
410   }
411 
412   /*
413    * Get the default replication.
414    *
415    * use reflection to search for getDefaultReplication(Path f)
416    * if the method doesn't exist, fall back to using getDefaultReplication()
417    *
418    * @param fs filesystem object
419    * @param f path of file
420    * @return default replication for the path's filesystem
421    * @throws IOException e
422    */
423   public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
424     Method m = null;
425     Class<? extends FileSystem> cls = fs.getClass();
426     try {
427       m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
428     } catch (NoSuchMethodException e) {
429       LOG.info("FileSystem doesn't support getDefaultReplication");
430     } catch (SecurityException e) {
431       LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
432       m = null; // could happen on setAccessible()
433     }
434     if (m == null) {
435       return fs.getDefaultReplication();
436     } else {
437       try {
438         Object ret = m.invoke(fs, path);
439         return ((Number)ret).shortValue();
440       } catch (Exception e) {
441         throw new IOException(e);
442       }
443     }
444   }
445 
446   /**
447    * Returns the default buffer size to use during writes.
448    *
449    * The size of the buffer should probably be a multiple of hardware
450    * page size (4096 on Intel x86), and it determines how much data is
451    * buffered during read and write operations.
452    *
453    * @param fs filesystem object
454    * @return default buffer size to use during writes
455    */
456   public static int getDefaultBufferSize(final FileSystem fs) {
457     return fs.getConf().getInt("io.file.buffer.size", 4096);
458   }
459 
460   /**
461    * Sets version of file system
462    *
463    * @param fs filesystem object
464    * @param rootdir hbase root directory
465    * @param version version to set
466    * @param wait time to wait for retry
467    * @param retries number of times to retry before throwing an IOException
468    * @throws IOException e
469    */
470   public static void setVersion(FileSystem fs, Path rootdir, String version,
471       int wait, int retries) throws IOException {
472     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
473     Path tmpFile = new Path(new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY), HConstants.VERSION_FILE_NAME);
474     while (true) {
475       try {
476         FSDataOutputStream s = fs.create(tmpFile);
477         s.writeUTF(version);
478         s.close();
479         if (!fs.rename(tmpFile, versionFile)) {
480           throw new IOException("Unable to move temp version file to " + versionFile);
481         }
482         LOG.debug("Created version file at " + rootdir.toString() +
483             " set its version at:" + version);
484         return;
485       } catch (IOException e) {
486         if (retries > 0) {
487           LOG.warn("Unable to create version file at " + rootdir.toString() +
488               ", retrying: " + e.getMessage());
489           fs.delete(versionFile, false);
490           try {
491             if (wait > 0) {
492               Thread.sleep(wait);
493             }
494           } catch (InterruptedException ex) {
495             // ignore
496           }
497           retries--;
498         } else {
499           throw e;
500         }
501       }
502     }
503   }
504 
505   /**
506    * Checks that a cluster ID file exists in the HBase root directory
507    * @param fs the root directory FileSystem
508    * @param rootdir the HBase root directory in HDFS
509    * @param wait how long to wait between retries
510    * @return <code>true</code> if the file exists, otherwise <code>false</code>
511    * @throws IOException if checking the FileSystem fails
512    */
513   public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
514       int wait) throws IOException {
515     while (true) {
516       try {
517         Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
518         return fs.exists(filePath);
519       } catch (IOException ioe) {
520         if (wait > 0) {
521           LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
522               ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
523           try {
524             Thread.sleep(wait);
525           } catch (InterruptedException ie) {
526             Thread.interrupted();
527             break;
528           }
529         } else {
530           throw ioe;
531         }
532       }
533     }
534     return false;
535   }
536 
537   /**
538    * Returns the value of the unique cluster ID stored for this HBase instance.
539    * @param fs the root directory FileSystem
540    * @param rootdir the path to the HBase root directory
541    * @return the unique cluster identifier
542    * @throws IOException if reading the cluster ID file fails
543    */
544   public static String getClusterId(FileSystem fs, Path rootdir)
545       throws IOException {
546     Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
547     String clusterId = null;
548     if (fs.exists(idPath)) {
549       FSDataInputStream in = fs.open(idPath);
550       try {
551         clusterId = in.readUTF();
552       } catch (EOFException eof) {
553         LOG.warn("Cluster ID file "+idPath.toString()+" was empty");
554       } finally{
555         in.close();
556       }
557     } else {
558       LOG.warn("Cluster ID file does not exist at " + idPath.toString());
559     }
560     return clusterId;
561   }
562 
563   /**
564    * Writes a new unique identifier for this cluster to the "hbase.id" file
565    * in the HBase root directory
566    * @param fs the root directory FileSystem
567    * @param rootdir the path to the HBase root directory
568    * @param clusterId the unique identifier to store
569    * @param wait how long (in milliseconds) to wait between retries
570    * @throws IOException if writing to the FileSystem fails and no wait value
571    */
572   public static void setClusterId(FileSystem fs, Path rootdir, String clusterId,
573       int wait) throws IOException {
574     Path idFfile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
575     Path tmpFile = new Path(new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY), HConstants.CLUSTER_ID_FILE_NAME);
576     while (true) {
577       try {
578         FSDataOutputStream s = fs.create(tmpFile);
579         s.writeUTF(clusterId);
580         s.close();
581         if (!fs.rename(tmpFile, idFfile)) {
582           throw new IOException("Unable to move temp version file to " + idFfile);
583         }
584         if (LOG.isDebugEnabled()) {
585           LOG.debug("Created cluster ID file at " + idFfile.toString() +
586               " with ID: " + clusterId);
587         }
588         return;
589       } catch (IOException ioe) {
590         if (wait > 0) {
591           LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
592               ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
593           try {
594             Thread.sleep(wait);
595           } catch (InterruptedException ie) {
596             Thread.interrupted();
597             break;
598           }
599         } else {
600           throw ioe;
601         }
602       }
603     }
604   }
605 
606   /**
607    * Verifies root directory path is a valid URI with a scheme
608    *
609    * @param root root directory path
610    * @return Passed <code>root</code> argument.
611    * @throws IOException if not a valid URI with a scheme
612    */
613   public static Path validateRootPath(Path root) throws IOException {
614     try {
615       URI rootURI = new URI(root.toString());
616       String scheme = rootURI.getScheme();
617       if (scheme == null) {
618         throw new IOException("Root directory does not have a scheme");
619       }
620       return root;
621     } catch (URISyntaxException e) {
622       IOException io = new IOException("Root directory path is not a valid " +
623         "URI -- check your " + HConstants.HBASE_DIR + " configuration");
624       io.initCause(e);
625       throw io;
626     }
627   }
628 
629   /**
630    * If DFS, check safe mode and if so, wait until we clear it.
631    * @param conf configuration
632    * @param wait Sleep between retries
633    * @throws IOException e
634    */
635   public static void waitOnSafeMode(final Configuration conf,
636     final long wait)
637   throws IOException {
638     FileSystem fs = FileSystem.get(conf);
639     if (!(fs instanceof DistributedFileSystem)) return;
640     DistributedFileSystem dfs = (DistributedFileSystem)fs;
641     // Make sure dfs is not in safe mode
642     while (isInSafeMode(dfs)) {
643       LOG.info("Waiting for dfs to exit safe mode...");
644       try {
645         Thread.sleep(wait);
646       } catch (InterruptedException e) {
647         //continue
648       }
649     }
650   }
651 
652   /**
653    * Return the 'path' component of a Path.  In Hadoop, Path is an URI.  This
654    * method returns the 'path' component of a Path's URI: e.g. If a Path is
655    * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>,
656    * this method returns <code>/hbase_trunk/TestTable/compaction.dir</code>.
657    * This method is useful if you want to print out a Path without qualifying
658    * Filesystem instance.
659    * @param p Filesystem Path whose 'path' component we are to return.
660    * @return Path portion of the Filesystem
661    */
662   public static String getPath(Path p) {
663     return p.toUri().getPath();
664   }
665 
666   /**
667    * @param c configuration
668    * @return Path to hbase root directory: i.e. <code>hbase.rootdir</code> from
669    * configuration as a qualified Path.
670    * @throws IOException e
671    */
672   public static Path getRootDir(final Configuration c) throws IOException {
673     Path p = new Path(c.get(HConstants.HBASE_DIR));
674     FileSystem fs = p.getFileSystem(c);
675     return p.makeQualified(fs);
676   }
677 
678   public static void setRootDir(final Configuration c, final Path root) throws IOException {
679     c.set(HConstants.HBASE_DIR, root.toString());
680   }
681 
682   public static void setFsDefault(final Configuration c, final Path root) throws IOException {
683     c.set("fs.defaultFS", root.toString());    // for hadoop 0.21+
684     c.set("fs.default.name", root.toString()); // for hadoop 0.20
685   }
686 
687   /**
688    * Checks if root region exists
689    *
690    * @param fs file system
691    * @param rootdir root directory of HBase installation
692    * @return true if exists
693    * @throws IOException e
694    */
695   public static boolean rootRegionExists(FileSystem fs, Path rootdir)
696   throws IOException {
697     Path rootRegionDir =
698       HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
699     return fs.exists(rootRegionDir);
700   }
701 
702   /**
703    * Compute HDFS blocks distribution of a given file, or a portion of the file
704    * @param fs file system
705    * @param status file status of the file
706    * @param start start position of the portion
707    * @param length length of the portion
708    * @return The HDFS blocks distribution
709    */
710   static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
711     final FileSystem fs, FileStatus status, long start, long length)
712     throws IOException {
713     HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
714     BlockLocation [] blockLocations =
715       fs.getFileBlockLocations(status, start, length);
716     for(BlockLocation bl : blockLocations) {
717       String [] hosts = bl.getHosts();
718       long len = bl.getLength();
719       blocksDistribution.addHostsAndBlockWeight(hosts, len);
720     }
721 
722     return blocksDistribution;
723   }
724 
725 
726 
727   /**
728    * Runs through the hbase rootdir and checks all stores have only
729    * one file in them -- that is, they've been major compacted.  Looks
730    * at root and meta tables too.
731    * @param fs filesystem
732    * @param hbaseRootDir hbase root directory
733    * @return True if this hbase install is major compacted.
734    * @throws IOException e
735    */
736   public static boolean isMajorCompacted(final FileSystem fs,
737       final Path hbaseRootDir)
738   throws IOException {
739     // Presumes any directory under hbase.rootdir is a table.
740     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
741     for (FileStatus tableDir : tableDirs) {
742       // Skip the .log directory.  All others should be tables.  Inside a table,
743       // there are compaction.dir directories to skip.  Otherwise, all else
744       // should be regions.  Then in each region, should only be family
745       // directories.  Under each of these, should be one file only.
746       Path d = tableDir.getPath();
747       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
748         continue;
749       }
750       FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
751       for (FileStatus regionDir : regionDirs) {
752         Path dd = regionDir.getPath();
753         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
754           continue;
755         }
756         // Else its a region name.  Now look in region for families.
757         FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
758         for (FileStatus familyDir : familyDirs) {
759           Path family = familyDir.getPath();
760           // Now in family make sure only one file.
761           FileStatus[] familyStatus = fs.listStatus(family);
762           if (familyStatus.length > 1) {
763             LOG.debug(family.toString() + " has " + familyStatus.length +
764                 " files.");
765             return false;
766           }
767         }
768       }
769     }
770     return true;
771   }
772 
773   // TODO move this method OUT of FSUtils. No dependencies to HMaster
774   /**
775    * Returns the total overall fragmentation percentage. Includes .META. and
776    * -ROOT- as well.
777    *
778    * @param master  The master defining the HBase root and file system.
779    * @return A map for each table and its percentage.
780    * @throws IOException When scanning the directory fails.
781    */
782   public static int getTotalTableFragmentation(final HMaster master)
783   throws IOException {
784     Map<String, Integer> map = getTableFragmentation(master);
785     return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
786   }
787 
788   /**
789    * Runs through the HBase rootdir and checks how many stores for each table
790    * have more than one file in them. Checks -ROOT- and .META. too. The total
791    * percentage across all tables is stored under the special key "-TOTAL-".
792    *
793    * @param master  The master defining the HBase root and file system.
794    * @return A map for each table and its percentage.
795    * @throws IOException When scanning the directory fails.
796    */
797   public static Map<String, Integer> getTableFragmentation(
798     final HMaster master)
799   throws IOException {
800     Path path = getRootDir(master.getConfiguration());
801     // since HMaster.getFileSystem() is package private
802     FileSystem fs = path.getFileSystem(master.getConfiguration());
803     return getTableFragmentation(fs, path);
804   }
805 
806   /**
807    * Runs through the HBase rootdir and checks how many stores for each table
808    * have more than one file in them. Checks -ROOT- and .META. too. The total
809    * percentage across all tables is stored under the special key "-TOTAL-".
810    *
811    * @param fs  The file system to use.
812    * @param hbaseRootDir  The root directory to scan.
813    * @return A map for each table and its percentage.
814    * @throws IOException When scanning the directory fails.
815    */
816   public static Map<String, Integer> getTableFragmentation(
817     final FileSystem fs, final Path hbaseRootDir)
818   throws IOException {
819     Map<String, Integer> frags = new HashMap<String, Integer>();
820     int cfCountTotal = 0;
821     int cfFragTotal = 0;
822     DirFilter df = new DirFilter(fs);
823     // presumes any directory under hbase.rootdir is a table
824     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
825     for (FileStatus tableDir : tableDirs) {
826       // Skip the .log directory.  All others should be tables.  Inside a table,
827       // there are compaction.dir directories to skip.  Otherwise, all else
828       // should be regions.  Then in each region, should only be family
829       // directories.  Under each of these, should be one file only.
830       Path d = tableDir.getPath();
831       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
832         continue;
833       }
834       int cfCount = 0;
835       int cfFrag = 0;
836       FileStatus[] regionDirs = fs.listStatus(d, df);
837       for (FileStatus regionDir : regionDirs) {
838         Path dd = regionDir.getPath();
839         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
840           continue;
841         }
842         // else its a region name, now look in region for families
843         FileStatus[] familyDirs = fs.listStatus(dd, df);
844         for (FileStatus familyDir : familyDirs) {
845           cfCount++;
846           cfCountTotal++;
847           Path family = familyDir.getPath();
848           // now in family make sure only one file
849           FileStatus[] familyStatus = fs.listStatus(family);
850           if (familyStatus.length > 1) {
851             cfFrag++;
852             cfFragTotal++;
853           }
854         }
855       }
856       // compute percentage per table and store in result list
857       frags.put(d.getName(), Math.round((float) cfFrag / cfCount * 100));
858     }
859     // set overall percentage for all tables
860     frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
861     return frags;
862   }
863 
864   /**
865    * Expects to find -ROOT- directory.
866    * @param fs filesystem
867    * @param hbaseRootDir hbase root directory
868    * @return True if this a pre020 layout.
869    * @throws IOException e
870    */
871   public static boolean isPre020FileLayout(final FileSystem fs,
872     final Path hbaseRootDir)
873   throws IOException {
874     Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
875       "70236052"), "info"), "mapfiles");
876     return fs.exists(mapfiles);
877   }
878 
879   /**
880    * Runs through the hbase rootdir and checks all stores have only
881    * one file in them -- that is, they've been major compacted.  Looks
882    * at root and meta tables too.  This version differs from
883    * {@link #isMajorCompacted(FileSystem, Path)} in that it expects a
884    * pre-0.20.0 hbase layout on the filesystem.  Used migrating.
885    * @param fs filesystem
886    * @param hbaseRootDir hbase root directory
887    * @return True if this hbase install is major compacted.
888    * @throws IOException e
889    */
890   public static boolean isMajorCompactedPre020(final FileSystem fs,
891       final Path hbaseRootDir)
892   throws IOException {
893     // Presumes any directory under hbase.rootdir is a table.
894     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
895     for (FileStatus tableDir : tableDirs) {
896       // Inside a table, there are compaction.dir directories to skip.
897       // Otherwise, all else should be regions.  Then in each region, should
898       // only be family directories.  Under each of these, should be a mapfile
899       // and info directory and in these only one file.
900       Path d = tableDir.getPath();
901       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
902         continue;
903       }
904       FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
905       for (FileStatus regionDir : regionDirs) {
906         Path dd = regionDir.getPath();
907         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
908           continue;
909         }
910         // Else its a region name.  Now look in region for families.
911         FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
912         for (FileStatus familyDir : familyDirs) {
913           Path family = familyDir.getPath();
914           FileStatus[] infoAndMapfile = fs.listStatus(family);
915           // Assert that only info and mapfile in family dir.
916           if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
917             LOG.debug(family.toString() +
918                 " has more than just info and mapfile: " + infoAndMapfile.length);
919             return false;
920           }
921           // Make sure directory named info or mapfile.
922           for (int ll = 0; ll < 2; ll++) {
923             if (infoAndMapfile[ll].getPath().getName().equals("info") ||
924                 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
925               continue;
926             LOG.debug("Unexpected directory name: " +
927                 infoAndMapfile[ll].getPath());
928             return false;
929           }
930           // Now in family, there are 'mapfile' and 'info' subdirs.  Just
931           // look in the 'mapfile' subdir.
932           FileStatus[] familyStatus =
933               fs.listStatus(new Path(family, "mapfiles"));
934           if (familyStatus.length > 1) {
935             LOG.debug(family.toString() + " has " + familyStatus.length +
936                 " files.");
937             return false;
938           }
939         }
940       }
941     }
942     return true;
943   }
944 
945   /**
946    * A {@link PathFilter} that returns only regular files.
947    */
948   static class FileFilter implements PathFilter {
949     private final FileSystem fs;
950 
951     public FileFilter(final FileSystem fs) {
952       this.fs = fs;
953     }
954 
955     @Override
956     public boolean accept(Path p) {
957       try {
958         return fs.isFile(p);
959       } catch (IOException e) {
960         LOG.debug("unable to verify if path=" + p + " is a regular file", e);
961         return false;
962       }
963     }
964   }
965 
966   /**
967    * A {@link PathFilter} that returns directories.
968    */
969   public static class DirFilter implements PathFilter {
970     private final FileSystem fs;
971 
972     public DirFilter(final FileSystem fs) {
973       this.fs = fs;
974     }
975 
976     @Override
977     public boolean accept(Path p) {
978       boolean isValid = false;
979       try {
980         if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p)) {
981           isValid = false;
982         } else {
983           isValid = this.fs.getFileStatus(p).isDir();
984         }
985       } catch (IOException e) {
986         e.printStackTrace();
987       }
988       return isValid;
989     }
990   }
991 
992   /**
993    * Heuristic to determine whether is safe or not to open a file for append
994    * Looks both for dfs.support.append and use reflection to search
995    * for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush()
996    * @param conf
997    * @return True if append support
998    */
999   public static boolean isAppendSupported(final Configuration conf) {
1000     boolean append = conf.getBoolean("dfs.support.append", false);
1001     if (append) {
1002       try {
1003         // TODO: The implementation that comes back when we do a createWriter
1004         // may not be using SequenceFile so the below is not a definitive test.
1005         // Will do for now (hdfs-200).
1006         SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1007         append = true;
1008       } catch (SecurityException e) {
1009       } catch (NoSuchMethodException e) {
1010         append = false;
1011       }
1012     }
1013     if (!append) {
1014       // Look for the 0.21, 0.22, new-style append evidence.
1015       try {
1016         FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1017         append = true;
1018       } catch (NoSuchMethodException e) {
1019         append = false;
1020       }
1021     }
1022     return append;
1023   }
1024 
1025   /**
1026    * @param conf
1027    * @return True if this filesystem whose scheme is 'hdfs'.
1028    * @throws IOException
1029    */
1030   public static boolean isHDFS(final Configuration conf) throws IOException {
1031     FileSystem fs = FileSystem.get(conf);
1032     String scheme = fs.getUri().getScheme();
1033     return scheme.equalsIgnoreCase("hdfs");
1034   }
1035 
1036   /**
1037    * Recover file lease. Used when a file might be suspect
1038    * to be had been left open by another process.
1039    * @param fs FileSystem handle
1040    * @param p Path of file to recover lease
1041    * @param conf Configuration handle
1042    * @throws IOException
1043    */
1044   public abstract void recoverFileLease(final FileSystem fs, final Path p,
1045       Configuration conf) throws IOException;
1046 
1047   /**
1048    * @param fs
1049    * @param rootdir
1050    * @return All the table directories under <code>rootdir</code>. Ignore non table hbase folders such as
1051    * .logs, .oldlogs, .corrupt, .META., and -ROOT- folders.
1052    * @throws IOException
1053    */
1054   public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1055   throws IOException {
1056     // presumes any directory under hbase.rootdir is a table
1057     FileStatus [] dirs = fs.listStatus(rootdir, new DirFilter(fs));
1058     List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1059     for (FileStatus dir: dirs) {
1060       Path p = dir.getPath();
1061       String tableName = p.getName();
1062       if (!HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName)) {
1063         tabledirs.add(p);
1064       }
1065     }
1066     return tabledirs;
1067   }
1068 
1069   public static Path getTablePath(Path rootdir, byte [] tableName) {
1070     return getTablePath(rootdir, Bytes.toString(tableName));
1071   }
1072 
1073   public static Path getTablePath(Path rootdir, final String tableName) {
1074     return new Path(rootdir, tableName);
1075   }
1076 
1077   /**
1078    * Filter for all dirs that don't start with '.'
1079    */
1080   public static class RegionDirFilter implements PathFilter {
1081     // This pattern will accept 0.90+ style hex region dirs and older numeric region dir names.
1082     final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1083     final FileSystem fs;
1084 
1085     public RegionDirFilter(FileSystem fs) {
1086       this.fs = fs;
1087     }
1088 
1089     @Override
1090     public boolean accept(Path rd) {
1091       if (!regionDirPattern.matcher(rd.getName()).matches()) {
1092         return false;
1093       }
1094 
1095       try {
1096         return fs.getFileStatus(rd).isDir();
1097       } catch (IOException ioe) {
1098         // Maybe the file was moved or the fs was disconnected.
1099         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1100         return false;
1101       }
1102     }
1103   }
1104 
1105   /**
1106    * Given a particular table dir, return all the regiondirs inside it, excluding files such as
1107    * .tableinfo
1108    * @param fs A file system for the Path
1109    * @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir>
1110    * @return List of paths to valid region directories in table dir.
1111    * @throws IOException
1112    */
1113   public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1114     // assumes we are in a table dir.
1115     FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1116     List<Path> regionDirs = new ArrayList<Path>(rds.length);
1117     for (FileStatus rdfs: rds) {
1118       Path rdPath = rdfs.getPath();
1119       regionDirs.add(rdPath);
1120     }
1121     return regionDirs;
1122   }
1123 
1124   /**
1125    * Filter for all dirs that are legal column family names.  This is generally used for colfam
1126    * dirs <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>.
1127    */
1128   public static class FamilyDirFilter implements PathFilter {
1129     final FileSystem fs;
1130 
1131     public FamilyDirFilter(FileSystem fs) {
1132       this.fs = fs;
1133     }
1134 
1135     @Override
1136     public boolean accept(Path rd) {
1137       try {
1138         // throws IAE if invalid
1139         HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1140       } catch (IllegalArgumentException iae) {
1141         // path name is an invalid family name and thus is excluded.
1142         return false;
1143       }
1144 
1145       try {
1146         return fs.getFileStatus(rd).isDir();
1147       } catch (IOException ioe) {
1148         // Maybe the file was moved or the fs was disconnected.
1149         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1150         return false;
1151       }
1152     }
1153   }
1154 
1155   /**
1156    * Given a particular region dir, return all the familydirs inside it
1157    *
1158    * @param fs A file system for the Path
1159    * @param regionDir Path to a specific region directory
1160    * @return List of paths to valid family directories in region dir.
1161    * @throws IOException
1162    */
1163   public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1164     // assumes we are in a region dir.
1165     FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1166     List<Path> familyDirs = new ArrayList<Path>(fds.length);
1167     for (FileStatus fdfs: fds) {
1168       Path fdPath = fdfs.getPath();
1169       familyDirs.add(fdPath);
1170     }
1171     return familyDirs;
1172   }
1173 
1174   /**
1175    * Filter for HFiles that excludes reference files.
1176    */
1177   public static class HFileFilter implements PathFilter {
1178     // This pattern will accept 0.90+ style hex hfies files but reject reference files
1179     final public static Pattern hfilePattern = Pattern.compile("^([0-9a-f]+)$");
1180 
1181     final FileSystem fs;
1182 
1183     public HFileFilter(FileSystem fs) {
1184       this.fs = fs;
1185     }
1186 
1187     @Override
1188     public boolean accept(Path rd) {
1189       if (!hfilePattern.matcher(rd.getName()).matches()) {
1190         return false;
1191       }
1192 
1193       try {
1194         // only files
1195         return !fs.getFileStatus(rd).isDir();
1196       } catch (IOException ioe) {
1197         // Maybe the file was moved or the fs was disconnected.
1198         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1199         return false;
1200       }
1201     }
1202   }
1203 
1204   /**
1205    * @param conf
1206    * @return Returns the filesystem of the hbase rootdir.
1207    * @throws IOException
1208    */
1209   public static FileSystem getCurrentFileSystem(Configuration conf)
1210   throws IOException {
1211     return getRootDir(conf).getFileSystem(conf);
1212   }
1213 
1214   /**
1215    * Runs through the HBase rootdir and creates a reverse lookup map for
1216    * table StoreFile names to the full Path.
1217    * <br>
1218    * Example...<br>
1219    * Key = 3944417774205889744  <br>
1220    * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
1221    *
1222    * @param fs  The file system to use.
1223    * @param hbaseRootDir  The root directory to scan.
1224    * @return Map keyed by StoreFile name with a value of the full Path.
1225    * @throws IOException When scanning the directory fails.
1226    */
1227   public static Map<String, Path> getTableStoreFilePathMap(
1228     final FileSystem fs, final Path hbaseRootDir)
1229   throws IOException {
1230     Map<String, Path> map = new HashMap<String, Path>();
1231     
1232     // if this method looks similar to 'getTableFragmentation' that is because 
1233     // it was borrowed from it.
1234     
1235     DirFilter df = new DirFilter(fs);
1236     // presumes any directory under hbase.rootdir is a table
1237     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
1238     for (FileStatus tableDir : tableDirs) {
1239       // Skip the .log and other non-table directories.  All others should be tables.
1240       // Inside a table, there are compaction.dir directories to skip.  Otherwise, all else
1241       // should be regions. 
1242       Path d = tableDir.getPath();
1243       if (HConstants.HBASE_NON_TABLE_DIRS.contains(d.getName())) {
1244         continue;
1245       }
1246       FileStatus[] regionDirs = fs.listStatus(d, df);
1247       for (FileStatus regionDir : regionDirs) {
1248         Path dd = regionDir.getPath();
1249         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1250           continue;
1251         }
1252         // else its a region name, now look in region for families
1253         FileStatus[] familyDirs = fs.listStatus(dd, df);
1254         for (FileStatus familyDir : familyDirs) {
1255           Path family = familyDir.getPath();
1256           // now in family, iterate over the StoreFiles and
1257           // put in map
1258           FileStatus[] familyStatus = fs.listStatus(family);
1259           for (FileStatus sfStatus : familyStatus) {
1260             Path sf = sfStatus.getPath();
1261             map.put( sf.getName(), sf);
1262           }
1263 
1264         }
1265       }
1266     }
1267       return map;
1268   }
1269 
1270   /**
1271    * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
1272    * This accommodates differences between hadoop versions
1273    *
1274    * @param fs file system
1275    * @param dir directory
1276    * @param filter path filter
1277    * @return null if dir is empty or doesn't exist, otherwise FileStatus array
1278    */
1279   public static FileStatus [] listStatus(final FileSystem fs,
1280       final Path dir, final PathFilter filter) throws IOException {
1281     FileStatus [] status = null;
1282     try {
1283       status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1284     } catch (FileNotFoundException fnfe) {
1285       // if directory doesn't exist, return null
1286       LOG.debug(dir + " doesn't exist");
1287     }
1288     if (status == null || status.length < 1) return null;
1289     return status;
1290   }
1291 
1292   /**
1293    * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
1294    * This would accommodates differences between hadoop versions
1295    *
1296    * @param fs file system
1297    * @param dir directory
1298    * @return null if dir is empty or doesn't exist, otherwise FileStatus array
1299    */
1300   public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1301     return listStatus(fs, dir, null);
1302   }
1303 
1304   /**
1305    * Calls fs.delete() and returns the value returned by the fs.delete()
1306    *
1307    * @param fs
1308    * @param path
1309    * @param recursive
1310    * @return
1311    * @throws IOException
1312    */
1313   public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1314       throws IOException {
1315     return fs.delete(path, recursive);
1316   }
1317 
1318   /**
1319    * Throw an exception if an action is not permitted by a user on a file.
1320    * 
1321    * @param user
1322    *          the user
1323    * @param file
1324    *          the file
1325    * @param action
1326    *          the action
1327    */
1328   public static void checkAccess(User user, FileStatus file,
1329       FsAction action) throws AccessControlException {
1330     // See HBASE-7814. UserGroupInformation from hadoop 0.20.x may not support getShortName().
1331     String username = user.getShortName();
1332     if (username.equals(file.getOwner())) {
1333       if (file.getPermission().getUserAction().implies(action)) {
1334         return;
1335       }
1336     } else if (contains(user.getGroupNames(), file.getGroup())) {
1337       if (file.getPermission().getGroupAction().implies(action)) {
1338         return;
1339       }
1340     } else if (file.getPermission().getOtherAction().implies(action)) {
1341       return;
1342     }
1343     throw new AccessControlException("Permission denied:" + " action=" + action
1344         + " path=" + file.getPath() + " user=" + username);
1345   }
1346 
1347   private static boolean contains(String[] groups, String user) {
1348     for (String group : groups) {
1349       if (group.equals(user)) {
1350         return true;
1351       }
1352     }
1353     return false;
1354   }
1355 
1356   /**
1357    * Calls fs.exists(). Checks if the specified path exists
1358    *
1359    * @param fs
1360    * @param path
1361    * @return
1362    * @throws IOException
1363    */
1364   public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1365     return fs.exists(path);
1366   }
1367 
1368   /**
1369    * Log the current state of the filesystem from a certain root directory
1370    * @param fs filesystem to investigate
1371    * @param root root file/directory to start logging from
1372    * @param LOG log to output information
1373    * @throws IOException if an unexpected exception occurs
1374    */
1375   public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1376       throws IOException {
1377     LOG.debug("Current file system:");
1378     logFSTree(LOG, fs, root, "|-");
1379   }
1380 
1381   /**
1382    * Recursive helper to log the state of the FS
1383    * @see #logFileSystemState(FileSystem, Path, Log)
1384    */
1385   private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1386       throws IOException {
1387     FileStatus[] files = FSUtils.listStatus(fs, root, null);
1388     if (files == null) return;
1389 
1390     for (FileStatus file : files) {
1391       if (file.isDir()) {
1392         LOG.debug(prefix + file.getPath().getName() + "/");
1393         logFSTree(LOG, fs, file.getPath(), prefix + "---");
1394       } else {
1395         LOG.debug(prefix + file.getPath().getName());
1396       }
1397     }
1398   }
1399 }