View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.util;
20  
21  import java.io.ByteArrayInputStream;
22  import java.io.DataInputStream;
23  import java.io.EOFException;
24  import java.io.FileNotFoundException;
25  import java.io.IOException;
26  import java.io.InputStream;
27  import java.io.InterruptedIOException;
28  import java.lang.reflect.InvocationTargetException;
29  import java.lang.reflect.Method;
30  import java.net.InetSocketAddress;
31  import java.net.URI;
32  import java.net.URISyntaxException;
33  import java.util.ArrayList;
34  import java.util.Collections;
35  import java.util.HashMap;
36  import java.util.LinkedList;
37  import java.util.List;
38  import java.util.Map;
39  import java.util.concurrent.ArrayBlockingQueue;
40  import java.util.concurrent.ConcurrentHashMap;
41  import java.util.concurrent.ThreadPoolExecutor;
42  import java.util.concurrent.TimeUnit;
43  import java.util.regex.Pattern;
44  
45  import org.apache.commons.logging.Log;
46  import org.apache.commons.logging.LogFactory;
47  import org.apache.hadoop.hbase.classification.InterfaceAudience;
48  import org.apache.hadoop.conf.Configuration;
49  import org.apache.hadoop.fs.BlockLocation;
50  import org.apache.hadoop.fs.FSDataInputStream;
51  import org.apache.hadoop.fs.FSDataOutputStream;
52  import org.apache.hadoop.fs.FileStatus;
53  import org.apache.hadoop.fs.FileSystem;
54  import org.apache.hadoop.fs.Path;
55  import org.apache.hadoop.fs.PathFilter;
56  import org.apache.hadoop.fs.permission.FsAction;
57  import org.apache.hadoop.fs.permission.FsPermission;
58  import org.apache.hadoop.hbase.ClusterId;
59  import org.apache.hadoop.hbase.HColumnDescriptor;
60  import org.apache.hadoop.hbase.HConstants;
61  import org.apache.hadoop.hbase.HDFSBlocksDistribution;
62  import org.apache.hadoop.hbase.HRegionInfo;
63  import org.apache.hadoop.hbase.TableName;
64  import org.apache.hadoop.hbase.exceptions.DeserializationException;
65  import org.apache.hadoop.hbase.fs.HFileSystem;
66  import org.apache.hadoop.hbase.master.HMaster;
67  import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
68  import org.apache.hadoop.hbase.security.AccessDeniedException;
69  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
70  import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
71  import org.apache.hadoop.hbase.regionserver.HRegion;
72  import org.apache.hadoop.hdfs.DFSClient;
73  import org.apache.hadoop.hdfs.DFSHedgedReadMetrics;
74  import org.apache.hadoop.hdfs.DistributedFileSystem;
75  import org.apache.hadoop.io.IOUtils;
76  import org.apache.hadoop.io.SequenceFile;
77  import org.apache.hadoop.ipc.RemoteException;
78  import org.apache.hadoop.security.UserGroupInformation;
79  import org.apache.hadoop.util.Progressable;
80  import org.apache.hadoop.util.ReflectionUtils;
81  import org.apache.hadoop.util.StringUtils;
82  
83  import com.google.common.primitives.Ints;
84  import com.google.protobuf.InvalidProtocolBufferException;
85  
86  /**
87   * Utility methods for interacting with the underlying file system.
88   */
89  @InterfaceAudience.Private
90  public abstract class FSUtils {
91    private static final Log LOG = LogFactory.getLog(FSUtils.class);
92  
93    /** Full access permissions (starting point for a umask) */
94    public static final String FULL_RWX_PERMISSIONS = "777";
95    private static final String THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize";
96    private static final int DEFAULT_THREAD_POOLSIZE = 2;
97  
98    /** Set to true on Windows platforms */
99    public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
100 
101   protected FSUtils() {
102     super();
103   }
104 
105   /**
106    * Compare of path component. Does not consider schema; i.e. if schemas different but <code>path
107    * <code> starts with <code>rootPath<code>, then the function returns true
108    * @param rootPath
109    * @param path
110    * @return True if <code>path</code> starts with <code>rootPath</code>
111    */
112   public static boolean isStartingWithPath(final Path rootPath, final String path) {
113     String uriRootPath = rootPath.toUri().getPath();
114     String tailUriPath = (new Path(path)).toUri().getPath();
115     return tailUriPath.startsWith(uriRootPath);
116   }
117 
118   /**
119    * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
120    * '/a/b/c' part. Does not consider schema; i.e. if schemas different but path or subpath matches,
121    * the two will equate.
122    * @param pathToSearch Path we will be trying to match.
123    * @param pathTail
124    * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
125    */
126   public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
127     return isMatchingTail(pathToSearch, new Path(pathTail));
128   }
129 
130   /**
131    * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
132    * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true.  Does not consider
133    * schema; i.e. if schemas different but path or subpath matches, the two will equate.
134    * @param pathToSearch Path we will be trying to match.
135    * @param pathTail
136    * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
137    */
138   public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
139     if (pathToSearch.depth() != pathTail.depth()) return false;
140     Path tailPath = pathTail;
141     String tailName;
142     Path toSearch = pathToSearch;
143     String toSearchName;
144     boolean result = false;
145     do {
146       tailName = tailPath.getName();
147       if (tailName == null || tailName.length() <= 0) {
148         result = true;
149         break;
150       }
151       toSearchName = toSearch.getName();
152       if (toSearchName == null || toSearchName.length() <= 0) break;
153       // Move up a parent on each path for next go around.  Path doesn't let us go off the end.
154       tailPath = tailPath.getParent();
155       toSearch = toSearch.getParent();
156     } while(tailName.equals(toSearchName));
157     return result;
158   }
159 
160   public static FSUtils getInstance(FileSystem fs, Configuration conf) {
161     String scheme = fs.getUri().getScheme();
162     if (scheme == null) {
163       LOG.warn("Could not find scheme for uri " +
164           fs.getUri() + ", default to hdfs");
165       scheme = "hdfs";
166     }
167     Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
168         scheme + ".impl", FSHDFSUtils.class); // Default to HDFS impl
169     FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
170     return fsUtils;
171   }
172 
173   /**
174    * Delete if exists.
175    * @param fs filesystem object
176    * @param dir directory to delete
177    * @return True if deleted <code>dir</code>
178    * @throws IOException e
179    */
180   public static boolean deleteDirectory(final FileSystem fs, final Path dir)
181   throws IOException {
182     return fs.exists(dir) && fs.delete(dir, true);
183   }
184 
185   /**
186    * Return the number of bytes that large input files should be optimally
187    * be split into to minimize i/o time.
188    *
189    * use reflection to search for getDefaultBlockSize(Path f)
190    * if the method doesn't exist, fall back to using getDefaultBlockSize()
191    *
192    * @param fs filesystem object
193    * @return the default block size for the path's filesystem
194    * @throws IOException e
195    */
196   public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
197     Method m = null;
198     Class<? extends FileSystem> cls = fs.getClass();
199     try {
200       m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
201     } catch (NoSuchMethodException e) {
202       LOG.info("FileSystem doesn't support getDefaultBlockSize");
203     } catch (SecurityException e) {
204       LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
205       m = null; // could happen on setAccessible()
206     }
207     if (m == null) {
208       return fs.getDefaultBlockSize(path);
209     } else {
210       try {
211         Object ret = m.invoke(fs, path);
212         return ((Long)ret).longValue();
213       } catch (Exception e) {
214         throw new IOException(e);
215       }
216     }
217   }
218 
219   /*
220    * Get the default replication.
221    *
222    * use reflection to search for getDefaultReplication(Path f)
223    * if the method doesn't exist, fall back to using getDefaultReplication()
224    *
225    * @param fs filesystem object
226    * @param f path of file
227    * @return default replication for the path's filesystem
228    * @throws IOException e
229    */
230   public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
231     Method m = null;
232     Class<? extends FileSystem> cls = fs.getClass();
233     try {
234       m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
235     } catch (NoSuchMethodException e) {
236       LOG.info("FileSystem doesn't support getDefaultReplication");
237     } catch (SecurityException e) {
238       LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
239       m = null; // could happen on setAccessible()
240     }
241     if (m == null) {
242       return fs.getDefaultReplication(path);
243     } else {
244       try {
245         Object ret = m.invoke(fs, path);
246         return ((Number)ret).shortValue();
247       } catch (Exception e) {
248         throw new IOException(e);
249       }
250     }
251   }
252 
253   /**
254    * Returns the default buffer size to use during writes.
255    *
256    * The size of the buffer should probably be a multiple of hardware
257    * page size (4096 on Intel x86), and it determines how much data is
258    * buffered during read and write operations.
259    *
260    * @param fs filesystem object
261    * @return default buffer size to use during writes
262    */
263   public static int getDefaultBufferSize(final FileSystem fs) {
264     return fs.getConf().getInt("io.file.buffer.size", 4096);
265   }
266 
267   /**
268    * Create the specified file on the filesystem. By default, this will:
269    * <ol>
270    * <li>overwrite the file if it exists</li>
271    * <li>apply the umask in the configuration (if it is enabled)</li>
272    * <li>use the fs configured buffer size (or 4096 if not set)</li>
273    * <li>use the default replication</li>
274    * <li>use the default block size</li>
275    * <li>not track progress</li>
276    * </ol>
277    *
278    * @param fs {@link FileSystem} on which to write the file
279    * @param path {@link Path} to the file to write
280    * @param perm permissions
281    * @param favoredNodes
282    * @return output stream to the created file
283    * @throws IOException if the file cannot be created
284    */
285   public static FSDataOutputStream create(FileSystem fs, Path path,
286       FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException {
287     if (fs instanceof HFileSystem) {
288       FileSystem backingFs = ((HFileSystem)fs).getBackingFs();
289       if (backingFs instanceof DistributedFileSystem) {
290         // Try to use the favoredNodes version via reflection to allow backwards-
291         // compatibility.
292         try {
293           return (FSDataOutputStream) (DistributedFileSystem.class
294               .getDeclaredMethod("create", Path.class, FsPermission.class,
295                   boolean.class, int.class, short.class, long.class,
296                   Progressable.class, InetSocketAddress[].class)
297                   .invoke(backingFs, path, perm, true,
298                       getDefaultBufferSize(backingFs),
299                       getDefaultReplication(backingFs, path),
300                       getDefaultBlockSize(backingFs, path),
301                       null, favoredNodes));
302         } catch (InvocationTargetException ite) {
303           // Function was properly called, but threw it's own exception.
304           throw new IOException(ite.getCause());
305         } catch (NoSuchMethodException e) {
306           LOG.debug("DFS Client does not support most favored nodes create; using default create");
307           if (LOG.isTraceEnabled()) LOG.trace("Ignoring; use default create", e);
308         } catch (IllegalArgumentException e) {
309           LOG.debug("Ignoring (most likely Reflection related exception) " + e);
310         } catch (SecurityException e) {
311           LOG.debug("Ignoring (most likely Reflection related exception) " + e);
312         } catch (IllegalAccessException e) {
313           LOG.debug("Ignoring (most likely Reflection related exception) " + e);
314         }
315       }
316     }
317     return create(fs, path, perm, true);
318   }
319 
320   /**
321    * Create the specified file on the filesystem. By default, this will:
322    * <ol>
323    * <li>apply the umask in the configuration (if it is enabled)</li>
324    * <li>use the fs configured buffer size (or 4096 if not set)</li>
325    * <li>use the default replication</li>
326    * <li>use the default block size</li>
327    * <li>not track progress</li>
328    * </ol>
329    *
330    * @param fs {@link FileSystem} on which to write the file
331    * @param path {@link Path} to the file to write
332    * @param perm
333    * @param overwrite Whether or not the created file should be overwritten.
334    * @return output stream to the created file
335    * @throws IOException if the file cannot be created
336    */
337   public static FSDataOutputStream create(FileSystem fs, Path path,
338       FsPermission perm, boolean overwrite) throws IOException {
339     if (LOG.isTraceEnabled()) {
340       LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
341     }
342     return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
343         getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
344   }
345 
346   /**
347    * Get the file permissions specified in the configuration, if they are
348    * enabled.
349    *
350    * @param fs filesystem that the file will be created on.
351    * @param conf configuration to read for determining if permissions are
352    *          enabled and which to use
353    * @param permssionConfKey property key in the configuration to use when
354    *          finding the permission
355    * @return the permission to use when creating a new file on the fs. If
356    *         special permissions are not specified in the configuration, then
357    *         the default permissions on the the fs will be returned.
358    */
359   public static FsPermission getFilePermissions(final FileSystem fs,
360       final Configuration conf, final String permssionConfKey) {
361     boolean enablePermissions = conf.getBoolean(
362         HConstants.ENABLE_DATA_FILE_UMASK, false);
363 
364     if (enablePermissions) {
365       try {
366         FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
367         // make sure that we have a mask, if not, go default.
368         String mask = conf.get(permssionConfKey);
369         if (mask == null)
370           return FsPermission.getFileDefault();
371         // appy the umask
372         FsPermission umask = new FsPermission(mask);
373         return perm.applyUMask(umask);
374       } catch (IllegalArgumentException e) {
375         LOG.warn(
376             "Incorrect umask attempted to be created: "
377                 + conf.get(permssionConfKey)
378                 + ", using default file permissions.", e);
379         return FsPermission.getFileDefault();
380       }
381     }
382     return FsPermission.getFileDefault();
383   }
384 
385   /**
386    * Checks to see if the specified file system is available
387    *
388    * @param fs filesystem
389    * @throws IOException e
390    */
391   public static void checkFileSystemAvailable(final FileSystem fs)
392   throws IOException {
393     if (!(fs instanceof DistributedFileSystem)) {
394       return;
395     }
396     IOException exception = null;
397     DistributedFileSystem dfs = (DistributedFileSystem) fs;
398     try {
399       if (dfs.exists(new Path("/"))) {
400         return;
401       }
402     } catch (IOException e) {
403       exception = e instanceof RemoteException ?
404               ((RemoteException)e).unwrapRemoteException() : e;
405     }
406     try {
407       fs.close();
408     } catch (Exception e) {
409       LOG.error("file system close failed: ", e);
410     }
411     IOException io = new IOException("File system is not available");
412     io.initCause(exception);
413     throw io;
414   }
415 
416   /**
417    * We use reflection because {@link DistributedFileSystem#setSafeMode(
418    * FSConstants.SafeModeAction action, boolean isChecked)} is not in hadoop 1.1
419    *
420    * @param dfs
421    * @return whether we're in safe mode
422    * @throws IOException
423    */
424   private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
425     boolean inSafeMode = false;
426     try {
427       Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
428           org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
429       inSafeMode = (Boolean) m.invoke(dfs,
430         org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
431     } catch (Exception e) {
432       if (e instanceof IOException) throw (IOException) e;
433 
434       // Check whether dfs is on safemode.
435       inSafeMode = dfs.setSafeMode(
436         org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
437     }
438     return inSafeMode;
439   }
440 
441   /**
442    * Check whether dfs is in safemode.
443    * @param conf
444    * @throws IOException
445    */
446   public static void checkDfsSafeMode(final Configuration conf)
447   throws IOException {
448     boolean isInSafeMode = false;
449     FileSystem fs = FileSystem.get(conf);
450     if (fs instanceof DistributedFileSystem) {
451       DistributedFileSystem dfs = (DistributedFileSystem)fs;
452       isInSafeMode = isInSafeMode(dfs);
453     }
454     if (isInSafeMode) {
455       throw new IOException("File system is in safemode, it can't be written now");
456     }
457   }
458 
459   /**
460    * Verifies current version of file system
461    *
462    * @param fs filesystem object
463    * @param rootdir root hbase directory
464    * @return null if no version file exists, version string otherwise.
465    * @throws IOException e
466    * @throws org.apache.hadoop.hbase.exceptions.DeserializationException
467    */
468   public static String getVersion(FileSystem fs, Path rootdir)
469   throws IOException, DeserializationException {
470     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
471     FileStatus[] status = null;
472     try {
473       // hadoop 2.0 throws FNFE if directory does not exist.
474       // hadoop 1.0 returns null if directory does not exist.
475       status = fs.listStatus(versionFile);
476     } catch (FileNotFoundException fnfe) {
477       return null;
478     }
479     if (status == null || status.length == 0) return null;
480     String version = null;
481     byte [] content = new byte [(int)status[0].getLen()];
482     FSDataInputStream s = fs.open(versionFile);
483     try {
484       IOUtils.readFully(s, content, 0, content.length);
485       if (ProtobufUtil.isPBMagicPrefix(content)) {
486         version = parseVersionFrom(content);
487       } else {
488         // Presume it pre-pb format.
489         InputStream is = new ByteArrayInputStream(content);
490         DataInputStream dis = new DataInputStream(is);
491         try {
492           version = dis.readUTF();
493         } finally {
494           dis.close();
495         }
496       }
497     } catch (EOFException eof) {
498       LOG.warn("Version file was empty, odd, will try to set it.");
499     } finally {
500       s.close();
501     }
502     return version;
503   }
504 
505   /**
506    * Parse the content of the ${HBASE_ROOTDIR}/hbase.version file.
507    * @param bytes The byte content of the hbase.version file.
508    * @return The version found in the file as a String.
509    * @throws DeserializationException
510    */
511   static String parseVersionFrom(final byte [] bytes)
512   throws DeserializationException {
513     ProtobufUtil.expectPBMagicPrefix(bytes);
514     int pblen = ProtobufUtil.lengthOfPBMagic();
515     FSProtos.HBaseVersionFileContent.Builder builder =
516       FSProtos.HBaseVersionFileContent.newBuilder();
517     FSProtos.HBaseVersionFileContent fileContent;
518     try {
519       fileContent = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
520       return fileContent.getVersion();
521     } catch (InvalidProtocolBufferException e) {
522       // Convert
523       throw new DeserializationException(e);
524     }
525   }
526 
527   /**
528    * Create the content to write into the ${HBASE_ROOTDIR}/hbase.version file.
529    * @param version Version to persist
530    * @return Serialized protobuf with <code>version</code> content and a bit of pb magic for a prefix.
531    */
532   static byte [] toVersionByteArray(final String version) {
533     FSProtos.HBaseVersionFileContent.Builder builder =
534       FSProtos.HBaseVersionFileContent.newBuilder();
535     return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
536   }
537 
538   /**
539    * Verifies current version of file system
540    *
541    * @param fs file system
542    * @param rootdir root directory of HBase installation
543    * @param message if true, issues a message on System.out
544    *
545    * @throws IOException e
546    * @throws DeserializationException
547    */
548   public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
549   throws IOException, DeserializationException {
550     checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
551   }
552 
553   /**
554    * Verifies current version of file system
555    *
556    * @param fs file system
557    * @param rootdir root directory of HBase installation
558    * @param message if true, issues a message on System.out
559    * @param wait wait interval
560    * @param retries number of times to retry
561    *
562    * @throws IOException e
563    * @throws DeserializationException
564    */
565   public static void checkVersion(FileSystem fs, Path rootdir,
566       boolean message, int wait, int retries)
567   throws IOException, DeserializationException {
568     String version = getVersion(fs, rootdir);
569     if (version == null) {
570       if (!metaRegionExists(fs, rootdir)) {
571         // rootDir is empty (no version file and no root region)
572         // just create new version file (HBASE-1195)
573         setVersion(fs, rootdir, wait, retries);
574         return;
575       }
576     } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return;
577 
578     // version is deprecated require migration
579     // Output on stdout so user sees it in terminal.
580     String msg = "HBase file layout needs to be upgraded."
581       + " You have version " + version
582       + " and I want version " + HConstants.FILE_SYSTEM_VERSION
583       + ". Consult http://hbase.apache.org/book.html for further information about upgrading HBase."
584       + " Is your hbase.rootdir valid? If so, you may need to run "
585       + "'hbase hbck -fixVersionFile'.";
586     if (message) {
587       System.out.println("WARNING! " + msg);
588     }
589     throw new FileSystemVersionException(msg);
590   }
591 
592   /**
593    * Sets version of file system
594    *
595    * @param fs filesystem object
596    * @param rootdir hbase root
597    * @throws IOException e
598    */
599   public static void setVersion(FileSystem fs, Path rootdir)
600   throws IOException {
601     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
602       HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
603   }
604 
605   /**
606    * Sets version of file system
607    *
608    * @param fs filesystem object
609    * @param rootdir hbase root
610    * @param wait time to wait for retry
611    * @param retries number of times to retry before failing
612    * @throws IOException e
613    */
614   public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
615   throws IOException {
616     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
617   }
618 
619 
620   /**
621    * Sets version of file system
622    *
623    * @param fs filesystem object
624    * @param rootdir hbase root directory
625    * @param version version to set
626    * @param wait time to wait for retry
627    * @param retries number of times to retry before throwing an IOException
628    * @throws IOException e
629    */
630   public static void setVersion(FileSystem fs, Path rootdir, String version,
631       int wait, int retries) throws IOException {
632     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
633     Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR +
634       HConstants.VERSION_FILE_NAME);
635     while (true) {
636       try {
637         // Write the version to a temporary file
638         FSDataOutputStream s = fs.create(tempVersionFile);
639         try {
640           s.write(toVersionByteArray(version));
641           s.close();
642           s = null;
643           // Move the temp version file to its normal location. Returns false
644           // if the rename failed. Throw an IOE in that case.
645           if (!fs.rename(tempVersionFile, versionFile)) {
646             throw new IOException("Unable to move temp version file to " + versionFile);
647           }
648         } finally {
649           // Cleaning up the temporary if the rename failed would be trying
650           // too hard. We'll unconditionally create it again the next time
651           // through anyway, files are overwritten by default by create().
652 
653           // Attempt to close the stream on the way out if it is still open.
654           try {
655             if (s != null) s.close();
656           } catch (IOException ignore) { }
657         }
658         LOG.info("Created version file at " + rootdir.toString() + " with version=" + version);
659         return;
660       } catch (IOException e) {
661         if (retries > 0) {
662           LOG.debug("Unable to create version file at " + rootdir.toString() + ", retrying", e);
663           fs.delete(versionFile, false);
664           try {
665             if (wait > 0) {
666               Thread.sleep(wait);
667             }
668           } catch (InterruptedException ie) {
669             throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
670           }
671           retries--;
672         } else {
673           throw e;
674         }
675       }
676     }
677   }
678 
679   /**
680    * Checks that a cluster ID file exists in the HBase root directory
681    * @param fs the root directory FileSystem
682    * @param rootdir the HBase root directory in HDFS
683    * @param wait how long to wait between retries
684    * @return <code>true</code> if the file exists, otherwise <code>false</code>
685    * @throws IOException if checking the FileSystem fails
686    */
687   public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
688       int wait) throws IOException {
689     while (true) {
690       try {
691         Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
692         return fs.exists(filePath);
693       } catch (IOException ioe) {
694         if (wait > 0) {
695           LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
696               ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
697           try {
698             Thread.sleep(wait);
699           } catch (InterruptedException e) {
700             throw (InterruptedIOException)new InterruptedIOException().initCause(e);
701           }
702         } else {
703           throw ioe;
704         }
705       }
706     }
707   }
708 
709   /**
710    * Returns the value of the unique cluster ID stored for this HBase instance.
711    * @param fs the root directory FileSystem
712    * @param rootdir the path to the HBase root directory
713    * @return the unique cluster identifier
714    * @throws IOException if reading the cluster ID file fails
715    */
716   public static ClusterId getClusterId(FileSystem fs, Path rootdir)
717   throws IOException {
718     Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
719     ClusterId clusterId = null;
720     FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath):  null;
721     if (status != null) {
722       int len = Ints.checkedCast(status.getLen());
723       byte [] content = new byte[len];
724       FSDataInputStream in = fs.open(idPath);
725       try {
726         in.readFully(content);
727       } catch (EOFException eof) {
728         LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
729       } finally{
730         in.close();
731       }
732       try {
733         clusterId = ClusterId.parseFrom(content);
734       } catch (DeserializationException e) {
735         throw new IOException("content=" + Bytes.toString(content), e);
736       }
737       // If not pb'd, make it so.
738       if (!ProtobufUtil.isPBMagicPrefix(content)) {
739         String cid = null;
740         in = fs.open(idPath);
741         try {
742           cid = in.readUTF();
743           clusterId = new ClusterId(cid);
744         } catch (EOFException eof) {
745           LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
746         } finally {
747           in.close();
748         }
749         rewriteAsPb(fs, rootdir, idPath, clusterId);
750       }
751       return clusterId;
752     } else {
753       LOG.warn("Cluster ID file does not exist at " + idPath.toString());
754     }
755     return clusterId;
756   }
757 
758   /**
759    * @param cid
760    * @throws IOException
761    */
762   private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
763       final ClusterId cid)
764   throws IOException {
765     // Rewrite the file as pb.  Move aside the old one first, write new
766     // then delete the moved-aside file.
767     Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
768     if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
769     setClusterId(fs, rootdir, cid, 100);
770     if (!fs.delete(movedAsideName, false)) {
771       throw new IOException("Failed delete of " + movedAsideName);
772     }
773     LOG.debug("Rewrote the hbase.id file as pb");
774   }
775 
776   /**
777    * Writes a new unique identifier for this cluster to the "hbase.id" file
778    * in the HBase root directory
779    * @param fs the root directory FileSystem
780    * @param rootdir the path to the HBase root directory
781    * @param clusterId the unique identifier to store
782    * @param wait how long (in milliseconds) to wait between retries
783    * @throws IOException if writing to the FileSystem fails and no wait value
784    */
785   public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
786       int wait) throws IOException {
787     while (true) {
788       try {
789         Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
790         Path tempIdFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY +
791           Path.SEPARATOR + HConstants.CLUSTER_ID_FILE_NAME);
792         // Write the id file to a temporary location
793         FSDataOutputStream s = fs.create(tempIdFile);
794         try {
795           s.write(clusterId.toByteArray());
796           s.close();
797           s = null;
798           // Move the temporary file to its normal location. Throw an IOE if
799           // the rename failed
800           if (!fs.rename(tempIdFile, idFile)) {
801             throw new IOException("Unable to move temp version file to " + idFile);
802           }
803         } finally {
804           // Attempt to close the stream if still open on the way out
805           try {
806             if (s != null) s.close();
807           } catch (IOException ignore) { }
808         }
809         if (LOG.isDebugEnabled()) {
810           LOG.debug("Created cluster ID file at " + idFile.toString() + " with ID: " + clusterId);
811         }
812         return;
813       } catch (IOException ioe) {
814         if (wait > 0) {
815           LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
816               ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
817           try {
818             Thread.sleep(wait);
819           } catch (InterruptedException e) {
820             throw (InterruptedIOException)new InterruptedIOException().initCause(e);
821           }
822         } else {
823           throw ioe;
824         }
825       }
826     }
827   }
828 
829   /**
830    * Verifies root directory path is a valid URI with a scheme
831    *
832    * @param root root directory path
833    * @return Passed <code>root</code> argument.
834    * @throws IOException if not a valid URI with a scheme
835    */
836   public static Path validateRootPath(Path root) throws IOException {
837     try {
838       URI rootURI = new URI(root.toString());
839       String scheme = rootURI.getScheme();
840       if (scheme == null) {
841         throw new IOException("Root directory does not have a scheme");
842       }
843       return root;
844     } catch (URISyntaxException e) {
845       IOException io = new IOException("Root directory path is not a valid " +
846         "URI -- check your " + HConstants.HBASE_DIR + " configuration");
847       io.initCause(e);
848       throw io;
849     }
850   }
851 
852   /**
853    * Checks for the presence of the root path (using the provided conf object) in the given path. If
854    * it exists, this method removes it and returns the String representation of remaining relative path.
855    * @param path
856    * @param conf
857    * @return String representation of the remaining relative path
858    * @throws IOException
859    */
860   public static String removeRootPath(Path path, final Configuration conf) throws IOException {
861     Path root = FSUtils.getRootDir(conf);
862     String pathStr = path.toString();
863     // check that the path is absolute... it has the root path in it.
864     if (!pathStr.startsWith(root.toString())) return pathStr;
865     // if not, return as it is.
866     return pathStr.substring(root.toString().length() + 1);// remove the "/" too.
867   }
868 
869   /**
870    * If DFS, check safe mode and if so, wait until we clear it.
871    * @param conf configuration
872    * @param wait Sleep between retries
873    * @throws IOException e
874    */
875   public static void waitOnSafeMode(final Configuration conf,
876     final long wait)
877   throws IOException {
878     FileSystem fs = FileSystem.get(conf);
879     if (!(fs instanceof DistributedFileSystem)) return;
880     DistributedFileSystem dfs = (DistributedFileSystem)fs;
881     // Make sure dfs is not in safe mode
882     while (isInSafeMode(dfs)) {
883       LOG.info("Waiting for dfs to exit safe mode...");
884       try {
885         Thread.sleep(wait);
886       } catch (InterruptedException e) {
887         throw (InterruptedIOException)new InterruptedIOException().initCause(e);
888       }
889     }
890   }
891 
892   /**
893    * Return the 'path' component of a Path.  In Hadoop, Path is an URI.  This
894    * method returns the 'path' component of a Path's URI: e.g. If a Path is
895    * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>,
896    * this method returns <code>/hbase_trunk/TestTable/compaction.dir</code>.
897    * This method is useful if you want to print out a Path without qualifying
898    * Filesystem instance.
899    * @param p Filesystem Path whose 'path' component we are to return.
900    * @return Path portion of the Filesystem
901    */
902   public static String getPath(Path p) {
903     return p.toUri().getPath();
904   }
905 
906   /**
907    * @param c configuration
908    * @return Path to hbase root directory: i.e. <code>hbase.rootdir</code> from
909    * configuration as a qualified Path.
910    * @throws IOException e
911    */
912   public static Path getRootDir(final Configuration c) throws IOException {
913     Path p = new Path(c.get(HConstants.HBASE_DIR));
914     FileSystem fs = p.getFileSystem(c);
915     return p.makeQualified(fs);
916   }
917 
918   public static void setRootDir(final Configuration c, final Path root) throws IOException {
919     c.set(HConstants.HBASE_DIR, root.toString());
920   }
921 
922   public static void setFsDefault(final Configuration c, final Path root) throws IOException {
923     c.set("fs.defaultFS", root.toString());    // for hadoop 0.21+
924   }
925 
926   /**
927    * Checks if meta region exists
928    *
929    * @param fs file system
930    * @param rootdir root directory of HBase installation
931    * @return true if exists
932    * @throws IOException e
933    */
934   @SuppressWarnings("deprecation")
935   public static boolean metaRegionExists(FileSystem fs, Path rootdir)
936   throws IOException {
937     Path metaRegionDir =
938       HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
939     return fs.exists(metaRegionDir);
940   }
941 
942   /**
943    * Compute HDFS blocks distribution of a given file, or a portion of the file
944    * @param fs file system
945    * @param status file status of the file
946    * @param start start position of the portion
947    * @param length length of the portion
948    * @return The HDFS blocks distribution
949    */
950   static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
951     final FileSystem fs, FileStatus status, long start, long length)
952     throws IOException {
953     HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
954     BlockLocation [] blockLocations =
955       fs.getFileBlockLocations(status, start, length);
956     for(BlockLocation bl : blockLocations) {
957       String [] hosts = bl.getHosts();
958       long len = bl.getLength();
959       blocksDistribution.addHostsAndBlockWeight(hosts, len);
960     }
961 
962     return blocksDistribution;
963   }
964 
965 
966 
967   /**
968    * Runs through the hbase rootdir and checks all stores have only
969    * one file in them -- that is, they've been major compacted.  Looks
970    * at root and meta tables too.
971    * @param fs filesystem
972    * @param hbaseRootDir hbase root directory
973    * @return True if this hbase install is major compacted.
974    * @throws IOException e
975    */
976   public static boolean isMajorCompacted(final FileSystem fs,
977       final Path hbaseRootDir)
978   throws IOException {
979     List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
980     PathFilter regionFilter = new RegionDirFilter(fs);
981     PathFilter familyFilter = new FamilyDirFilter(fs);
982     for (Path d : tableDirs) {
983       FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
984       for (FileStatus regionDir : regionDirs) {
985         Path dd = regionDir.getPath();
986         // Else its a region name.  Now look in region for families.
987         FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
988         for (FileStatus familyDir : familyDirs) {
989           Path family = familyDir.getPath();
990           // Now in family make sure only one file.
991           FileStatus[] familyStatus = fs.listStatus(family);
992           if (familyStatus.length > 1) {
993             LOG.debug(family.toString() + " has " + familyStatus.length +
994                 " files.");
995             return false;
996           }
997         }
998       }
999     }
1000     return true;
1001   }
1002 
1003   // TODO move this method OUT of FSUtils. No dependencies to HMaster
1004   /**
1005    * Returns the total overall fragmentation percentage. Includes hbase:meta and
1006    * -ROOT- as well.
1007    *
1008    * @param master  The master defining the HBase root and file system.
1009    * @return A map for each table and its percentage.
1010    * @throws IOException When scanning the directory fails.
1011    */
1012   public static int getTotalTableFragmentation(final HMaster master)
1013   throws IOException {
1014     Map<String, Integer> map = getTableFragmentation(master);
1015     return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
1016   }
1017 
1018   /**
1019    * Runs through the HBase rootdir and checks how many stores for each table
1020    * have more than one file in them. Checks -ROOT- and hbase:meta too. The total
1021    * percentage across all tables is stored under the special key "-TOTAL-".
1022    *
1023    * @param master  The master defining the HBase root and file system.
1024    * @return A map for each table and its percentage.
1025    *
1026    * @throws IOException When scanning the directory fails.
1027    */
1028   public static Map<String, Integer> getTableFragmentation(
1029     final HMaster master)
1030   throws IOException {
1031     Path path = getRootDir(master.getConfiguration());
1032     // since HMaster.getFileSystem() is package private
1033     FileSystem fs = path.getFileSystem(master.getConfiguration());
1034     return getTableFragmentation(fs, path);
1035   }
1036 
1037   /**
1038    * Runs through the HBase rootdir and checks how many stores for each table
1039    * have more than one file in them. Checks -ROOT- and hbase:meta too. The total
1040    * percentage across all tables is stored under the special key "-TOTAL-".
1041    *
1042    * @param fs  The file system to use.
1043    * @param hbaseRootDir  The root directory to scan.
1044    * @return A map for each table and its percentage.
1045    * @throws IOException When scanning the directory fails.
1046    */
1047   public static Map<String, Integer> getTableFragmentation(
1048     final FileSystem fs, final Path hbaseRootDir)
1049   throws IOException {
1050     Map<String, Integer> frags = new HashMap<String, Integer>();
1051     int cfCountTotal = 0;
1052     int cfFragTotal = 0;
1053     PathFilter regionFilter = new RegionDirFilter(fs);
1054     PathFilter familyFilter = new FamilyDirFilter(fs);
1055     List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1056     for (Path d : tableDirs) {
1057       int cfCount = 0;
1058       int cfFrag = 0;
1059       FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
1060       for (FileStatus regionDir : regionDirs) {
1061         Path dd = regionDir.getPath();
1062         // else its a region name, now look in region for families
1063         FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1064         for (FileStatus familyDir : familyDirs) {
1065           cfCount++;
1066           cfCountTotal++;
1067           Path family = familyDir.getPath();
1068           // now in family make sure only one file
1069           FileStatus[] familyStatus = fs.listStatus(family);
1070           if (familyStatus.length > 1) {
1071             cfFrag++;
1072             cfFragTotal++;
1073           }
1074         }
1075       }
1076       // compute percentage per table and store in result list
1077       frags.put(FSUtils.getTableName(d).getNameAsString(),
1078         cfCount == 0? 0: Math.round((float) cfFrag / cfCount * 100));
1079     }
1080     // set overall percentage for all tables
1081     frags.put("-TOTAL-",
1082       cfCountTotal == 0? 0: Math.round((float) cfFragTotal / cfCountTotal * 100));
1083     return frags;
1084   }
1085 
1086   /**
1087    * Returns the {@link org.apache.hadoop.fs.Path} object representing the table directory under
1088    * path rootdir
1089    *
1090    * @param rootdir qualified path of HBase root directory
1091    * @param tableName name of table
1092    * @return {@link org.apache.hadoop.fs.Path} for table
1093    */
1094   public static Path getTableDir(Path rootdir, final TableName tableName) {
1095     return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
1096         tableName.getQualifierAsString());
1097   }
1098 
1099   /**
1100    * Returns the {@link org.apache.hadoop.hbase.TableName} object representing
1101    * the table directory under
1102    * path rootdir
1103    *
1104    * @param tablePath path of table
1105    * @return {@link org.apache.hadoop.fs.Path} for table
1106    */
1107   public static TableName getTableName(Path tablePath) {
1108     return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
1109   }
1110 
1111   /**
1112    * Returns the {@link org.apache.hadoop.fs.Path} object representing
1113    * the namespace directory under path rootdir
1114    *
1115    * @param rootdir qualified path of HBase root directory
1116    * @param namespace namespace name
1117    * @return {@link org.apache.hadoop.fs.Path} for table
1118    */
1119   public static Path getNamespaceDir(Path rootdir, final String namespace) {
1120     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1121         new Path(namespace)));
1122   }
1123 
1124   /**
1125    * A {@link PathFilter} that returns only regular files.
1126    */
1127   static class FileFilter implements PathFilter {
1128     private final FileSystem fs;
1129 
1130     public FileFilter(final FileSystem fs) {
1131       this.fs = fs;
1132     }
1133 
1134     @Override
1135     public boolean accept(Path p) {
1136       try {
1137         return fs.isFile(p);
1138       } catch (IOException e) {
1139         LOG.debug("unable to verify if path=" + p + " is a regular file", e);
1140         return false;
1141       }
1142     }
1143   }
1144 
1145   /**
1146    * Directory filter that doesn't include any of the directories in the specified blacklist
1147    */
1148   public static class BlackListDirFilter implements PathFilter {
1149     private final FileSystem fs;
1150     private List<String> blacklist;
1151 
1152     /**
1153      * Create a filter on the give filesystem with the specified blacklist
1154      * @param fs filesystem to filter
1155      * @param directoryNameBlackList list of the names of the directories to filter. If
1156      *          <tt>null</tt>, all directories are returned
1157      */
1158     @SuppressWarnings("unchecked")
1159     public BlackListDirFilter(final FileSystem fs, final List<String> directoryNameBlackList) {
1160       this.fs = fs;
1161       blacklist =
1162         (List<String>) (directoryNameBlackList == null ? Collections.emptyList()
1163           : directoryNameBlackList);
1164     }
1165 
1166     @Override
1167     public boolean accept(Path p) {
1168       boolean isValid = false;
1169       try {
1170         if (isValidName(p.getName())) {
1171           isValid = fs.getFileStatus(p).isDirectory();
1172         } else {
1173           isValid = false;
1174         }
1175       } catch (IOException e) {
1176         LOG.warn("An error occurred while verifying if [" + p.toString()
1177             + "] is a valid directory. Returning 'not valid' and continuing.", e);
1178       }
1179       return isValid;
1180     }
1181 
1182     protected boolean isValidName(final String name) {
1183       return !blacklist.contains(name);
1184     }
1185   }
1186 
1187   /**
1188    * A {@link PathFilter} that only allows directories.
1189    */
1190   public static class DirFilter extends BlackListDirFilter {
1191 
1192     public DirFilter(FileSystem fs) {
1193       super(fs, null);
1194     }
1195   }
1196 
1197   /**
1198    * A {@link PathFilter} that returns usertable directories. To get all directories use the
1199    * {@link BlackListDirFilter} with a <tt>null</tt> blacklist
1200    */
1201   public static class UserTableDirFilter extends BlackListDirFilter {
1202     public UserTableDirFilter(FileSystem fs) {
1203       super(fs, HConstants.HBASE_NON_TABLE_DIRS);
1204     }
1205 
1206     protected boolean isValidName(final String name) {
1207       if (!super.isValidName(name))
1208         return false;
1209 
1210       try {
1211         TableName.isLegalTableQualifierName(Bytes.toBytes(name));
1212       } catch (IllegalArgumentException e) {
1213         LOG.info("INVALID NAME " + name);
1214         return false;
1215       }
1216       return true;
1217     }
1218   }
1219 
1220   /**
1221    * Heuristic to determine whether is safe or not to open a file for append
1222    * Looks both for dfs.support.append and use reflection to search
1223    * for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush()
1224    * @param conf
1225    * @return True if append support
1226    */
1227   public static boolean isAppendSupported(final Configuration conf) {
1228     boolean append = conf.getBoolean("dfs.support.append", false);
1229     if (append) {
1230       try {
1231         // TODO: The implementation that comes back when we do a createWriter
1232         // may not be using SequenceFile so the below is not a definitive test.
1233         // Will do for now (hdfs-200).
1234         SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1235         append = true;
1236       } catch (SecurityException e) {
1237       } catch (NoSuchMethodException e) {
1238         append = false;
1239       }
1240     }
1241     if (!append) {
1242       // Look for the 0.21, 0.22, new-style append evidence.
1243       try {
1244         FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1245         append = true;
1246       } catch (NoSuchMethodException e) {
1247         append = false;
1248       }
1249     }
1250     return append;
1251   }
1252 
1253   /**
1254    * @param conf
1255    * @return True if this filesystem whose scheme is 'hdfs'.
1256    * @throws IOException
1257    */
1258   public static boolean isHDFS(final Configuration conf) throws IOException {
1259     FileSystem fs = FileSystem.get(conf);
1260     String scheme = fs.getUri().getScheme();
1261     return scheme.equalsIgnoreCase("hdfs");
1262   }
1263 
1264   /**
1265    * Recover file lease. Used when a file might be suspect
1266    * to be had been left open by another process.
1267    * @param fs FileSystem handle
1268    * @param p Path of file to recover lease
1269    * @param conf Configuration handle
1270    * @throws IOException
1271    */
1272   public abstract void recoverFileLease(final FileSystem fs, final Path p,
1273       Configuration conf, CancelableProgressable reporter) throws IOException;
1274 
1275   public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1276       throws IOException {
1277     List<Path> tableDirs = new LinkedList<Path>();
1278 
1279     for(FileStatus status :
1280         fs.globStatus(new Path(rootdir,
1281             new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
1282       tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
1283     }
1284     return tableDirs;
1285   }
1286 
1287   /**
1288    * @param fs
1289    * @param rootdir
1290    * @return All the table directories under <code>rootdir</code>. Ignore non table hbase folders such as
1291    * .logs, .oldlogs, .corrupt folders.
1292    * @throws IOException
1293    */
1294   public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
1295       throws IOException {
1296     // presumes any directory under hbase.rootdir is a table
1297     FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
1298     List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1299     for (FileStatus dir: dirs) {
1300       tabledirs.add(dir.getPath());
1301     }
1302     return tabledirs;
1303   }
1304 
1305   /**
1306    * Checks if the given path is the one with 'recovered.edits' dir.
1307    * @param path
1308    * @return True if we recovered edits
1309    */
1310   public static boolean isRecoveredEdits(Path path) {
1311     return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
1312   }
1313 
1314   /**
1315    * Filter for all dirs that don't start with '.'
1316    */
1317   public static class RegionDirFilter implements PathFilter {
1318     // This pattern will accept 0.90+ style hex region dirs and older numeric region dir names.
1319     final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1320     final FileSystem fs;
1321 
1322     public RegionDirFilter(FileSystem fs) {
1323       this.fs = fs;
1324     }
1325 
1326     @Override
1327     public boolean accept(Path rd) {
1328       if (!regionDirPattern.matcher(rd.getName()).matches()) {
1329         return false;
1330       }
1331 
1332       try {
1333         return fs.getFileStatus(rd).isDirectory();
1334       } catch (IOException ioe) {
1335         // Maybe the file was moved or the fs was disconnected.
1336         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1337         return false;
1338       }
1339     }
1340   }
1341 
1342   /**
1343    * Given a particular table dir, return all the regiondirs inside it, excluding files such as
1344    * .tableinfo
1345    * @param fs A file system for the Path
1346    * @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir>
1347    * @return List of paths to valid region directories in table dir.
1348    * @throws IOException
1349    */
1350   public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1351     // assumes we are in a table dir.
1352     FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1353     List<Path> regionDirs = new ArrayList<Path>(rds.length);
1354     for (FileStatus rdfs: rds) {
1355       Path rdPath = rdfs.getPath();
1356       regionDirs.add(rdPath);
1357     }
1358     return regionDirs;
1359   }
1360 
1361   /**
1362    * Filter for all dirs that are legal column family names.  This is generally used for colfam
1363    * dirs <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>.
1364    */
1365   public static class FamilyDirFilter implements PathFilter {
1366     final FileSystem fs;
1367 
1368     public FamilyDirFilter(FileSystem fs) {
1369       this.fs = fs;
1370     }
1371 
1372     @Override
1373     public boolean accept(Path rd) {
1374       try {
1375         // throws IAE if invalid
1376         HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1377       } catch (IllegalArgumentException iae) {
1378         // path name is an invalid family name and thus is excluded.
1379         return false;
1380       }
1381 
1382       try {
1383         return fs.getFileStatus(rd).isDirectory();
1384       } catch (IOException ioe) {
1385         // Maybe the file was moved or the fs was disconnected.
1386         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1387         return false;
1388       }
1389     }
1390   }
1391 
1392   /**
1393    * Given a particular region dir, return all the familydirs inside it
1394    *
1395    * @param fs A file system for the Path
1396    * @param regionDir Path to a specific region directory
1397    * @return List of paths to valid family directories in region dir.
1398    * @throws IOException
1399    */
1400   public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1401     // assumes we are in a region dir.
1402     FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1403     List<Path> familyDirs = new ArrayList<Path>(fds.length);
1404     for (FileStatus fdfs: fds) {
1405       Path fdPath = fdfs.getPath();
1406       familyDirs.add(fdPath);
1407     }
1408     return familyDirs;
1409   }
1410 
1411   public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
1412     FileStatus[] fds = fs.listStatus(familyDir, new ReferenceFileFilter(fs));
1413     List<Path> referenceFiles = new ArrayList<Path>(fds.length);
1414     for (FileStatus fdfs: fds) {
1415       Path fdPath = fdfs.getPath();
1416       referenceFiles.add(fdPath);
1417     }
1418     return referenceFiles;
1419   }
1420 
1421   /**
1422    * Filter for HFiles that excludes reference files.
1423    */
1424   public static class HFileFilter implements PathFilter {
1425     final FileSystem fs;
1426 
1427     public HFileFilter(FileSystem fs) {
1428       this.fs = fs;
1429     }
1430 
1431     @Override
1432     public boolean accept(Path rd) {
1433       try {
1434         // only files
1435         return !fs.getFileStatus(rd).isDirectory() && StoreFileInfo.isHFile(rd);
1436       } catch (IOException ioe) {
1437         // Maybe the file was moved or the fs was disconnected.
1438         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1439         return false;
1440       }
1441     }
1442   }
1443 
1444   public static class ReferenceFileFilter implements PathFilter {
1445 
1446     private final FileSystem fs;
1447 
1448     public ReferenceFileFilter(FileSystem fs) {
1449       this.fs = fs;
1450     }
1451 
1452     @Override
1453     public boolean accept(Path rd) {
1454       try {
1455         // only files can be references.
1456         return !fs.getFileStatus(rd).isDirectory() && StoreFileInfo.isReference(rd);
1457       } catch (IOException ioe) {
1458         // Maybe the file was moved or the fs was disconnected.
1459         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1460         return false;
1461       }
1462     }
1463   }
1464 
1465 
1466   /**
1467    * @param conf
1468    * @return Returns the filesystem of the hbase rootdir.
1469    * @throws IOException
1470    */
1471   public static FileSystem getCurrentFileSystem(Configuration conf)
1472   throws IOException {
1473     return getRootDir(conf).getFileSystem(conf);
1474   }
1475 
1476 
1477   /**
1478    * Runs through the HBase rootdir/tablename and creates a reverse lookup map for
1479    * table StoreFile names to the full Path.
1480    * <br>
1481    * Example...<br>
1482    * Key = 3944417774205889744  <br>
1483    * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
1484    *
1485    * @param map map to add values.  If null, this method will create and populate one to return
1486    * @param fs  The file system to use.
1487    * @param hbaseRootDir  The root directory to scan.
1488    * @param tableName name of the table to scan.
1489    * @return Map keyed by StoreFile name with a value of the full Path.
1490    * @throws IOException When scanning the directory fails.
1491    */
1492   public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1493   final FileSystem fs, final Path hbaseRootDir, TableName tableName)
1494   throws IOException {
1495     if (map == null) {
1496       map = new HashMap<String, Path>();
1497     }
1498 
1499     // only include the directory paths to tables
1500     Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
1501     // Inside a table, there are compaction.dir directories to skip.  Otherwise, all else
1502     // should be regions.
1503     PathFilter familyFilter = new FamilyDirFilter(fs);
1504     FileStatus[] regionDirs = fs.listStatus(tableDir, new RegionDirFilter(fs));
1505     for (FileStatus regionDir : regionDirs) {
1506       Path dd = regionDir.getPath();
1507       // else its a region name, now look in region for families
1508       FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1509       for (FileStatus familyDir : familyDirs) {
1510         Path family = familyDir.getPath();
1511         if (family.getName().equals(HConstants.RECOVERED_EDITS_DIR)) {
1512           continue;
1513         }
1514         // now in family, iterate over the StoreFiles and
1515         // put in map
1516         FileStatus[] familyStatus = fs.listStatus(family);
1517         for (FileStatus sfStatus : familyStatus) {
1518           Path sf = sfStatus.getPath();
1519           map.put( sf.getName(), sf);
1520         }
1521       }
1522     }
1523     return map;
1524   }
1525 
1526   public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) {
1527     int result = 0;
1528     try {
1529       for (Path familyDir:getFamilyDirs(fs, p)){
1530         result += getReferenceFilePaths(fs, familyDir).size();
1531       }
1532     } catch (IOException e) {
1533       LOG.warn("Error Counting reference files.", e);
1534     }
1535     return result;
1536   }
1537 
1538 
1539   /**
1540    * Runs through the HBase rootdir and creates a reverse lookup map for
1541    * table StoreFile names to the full Path.
1542    * <br>
1543    * Example...<br>
1544    * Key = 3944417774205889744  <br>
1545    * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
1546    *
1547    * @param fs  The file system to use.
1548    * @param hbaseRootDir  The root directory to scan.
1549    * @return Map keyed by StoreFile name with a value of the full Path.
1550    * @throws IOException When scanning the directory fails.
1551    */
1552   public static Map<String, Path> getTableStoreFilePathMap(
1553     final FileSystem fs, final Path hbaseRootDir)
1554   throws IOException {
1555     Map<String, Path> map = new HashMap<String, Path>();
1556 
1557     // if this method looks similar to 'getTableFragmentation' that is because
1558     // it was borrowed from it.
1559 
1560     // only include the directory paths to tables
1561     for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
1562       getTableStoreFilePathMap(map, fs, hbaseRootDir,
1563           FSUtils.getTableName(tableDir));
1564     }
1565     return map;
1566   }
1567 
1568   /**
1569    * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
1570    * This accommodates differences between hadoop versions, where hadoop 1
1571    * does not throw a FileNotFoundException, and return an empty FileStatus[]
1572    * while Hadoop 2 will throw FileNotFoundException.
1573    *
1574    * @param fs file system
1575    * @param dir directory
1576    * @param filter path filter
1577    * @return null if dir is empty or doesn't exist, otherwise FileStatus array
1578    */
1579   public static FileStatus [] listStatus(final FileSystem fs,
1580       final Path dir, final PathFilter filter) throws IOException {
1581     FileStatus [] status = null;
1582     try {
1583       status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1584     } catch (FileNotFoundException fnfe) {
1585       // if directory doesn't exist, return null
1586       if (LOG.isTraceEnabled()) {
1587         LOG.trace(dir + " doesn't exist");
1588       }
1589     }
1590     if (status == null || status.length < 1) return null;
1591     return status;
1592   }
1593 
1594   /**
1595    * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
1596    * This would accommodates differences between hadoop versions
1597    *
1598    * @param fs file system
1599    * @param dir directory
1600    * @return null if dir is empty or doesn't exist, otherwise FileStatus array
1601    */
1602   public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1603     return listStatus(fs, dir, null);
1604   }
1605 
1606   /**
1607    * Calls fs.delete() and returns the value returned by the fs.delete()
1608    *
1609    * @param fs
1610    * @param path
1611    * @param recursive
1612    * @return the value returned by the fs.delete()
1613    * @throws IOException
1614    */
1615   public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1616       throws IOException {
1617     return fs.delete(path, recursive);
1618   }
1619 
1620   /**
1621    * Calls fs.exists(). Checks if the specified path exists
1622    *
1623    * @param fs
1624    * @param path
1625    * @return the value returned by fs.exists()
1626    * @throws IOException
1627    */
1628   public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1629     return fs.exists(path);
1630   }
1631 
1632   /**
1633    * Throw an exception if an action is not permitted by a user on a file.
1634    *
1635    * @param ugi
1636    *          the user
1637    * @param file
1638    *          the file
1639    * @param action
1640    *          the action
1641    */
1642   public static void checkAccess(UserGroupInformation ugi, FileStatus file,
1643       FsAction action) throws AccessDeniedException {
1644     if (ugi.getShortUserName().equals(file.getOwner())) {
1645       if (file.getPermission().getUserAction().implies(action)) {
1646         return;
1647       }
1648     } else if (contains(ugi.getGroupNames(), file.getGroup())) {
1649       if (file.getPermission().getGroupAction().implies(action)) {
1650         return;
1651       }
1652     } else if (file.getPermission().getOtherAction().implies(action)) {
1653       return;
1654     }
1655     throw new AccessDeniedException("Permission denied:" + " action=" + action
1656         + " path=" + file.getPath() + " user=" + ugi.getShortUserName());
1657   }
1658 
1659   private static boolean contains(String[] groups, String user) {
1660     for (String group : groups) {
1661       if (group.equals(user)) {
1662         return true;
1663       }
1664     }
1665     return false;
1666   }
1667 
1668   /**
1669    * Log the current state of the filesystem from a certain root directory
1670    * @param fs filesystem to investigate
1671    * @param root root file/directory to start logging from
1672    * @param LOG log to output information
1673    * @throws IOException if an unexpected exception occurs
1674    */
1675   public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1676       throws IOException {
1677     LOG.debug("Current file system:");
1678     logFSTree(LOG, fs, root, "|-");
1679   }
1680 
1681   /**
1682    * Recursive helper to log the state of the FS
1683    *
1684    * @see #logFileSystemState(FileSystem, Path, Log)
1685    */
1686   private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1687       throws IOException {
1688     FileStatus[] files = FSUtils.listStatus(fs, root, null);
1689     if (files == null) return;
1690 
1691     for (FileStatus file : files) {
1692       if (file.isDirectory()) {
1693         LOG.debug(prefix + file.getPath().getName() + "/");
1694         logFSTree(LOG, fs, file.getPath(), prefix + "---");
1695       } else {
1696         LOG.debug(prefix + file.getPath().getName());
1697       }
1698     }
1699   }
1700 
1701   public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
1702       throws IOException {
1703     // set the modify time for TimeToLive Cleaner
1704     fs.setTimes(src, EnvironmentEdgeManager.currentTime(), -1);
1705     return fs.rename(src, dest);
1706   }
1707 
1708   /**
1709    * This function is to scan the root path of the file system to get the
1710    * degree of locality for each region on each of the servers having at least
1711    * one block of that region.
1712    * This is used by the tool {@link org.apache.hadoop.hbase.master.RegionPlacementMaintainer}
1713    *
1714    * @param conf
1715    *          the configuration to use
1716    * @return the mapping from region encoded name to a map of server names to
1717    *           locality fraction
1718    * @throws IOException
1719    *           in case of file system errors or interrupts
1720    */
1721   public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1722       final Configuration conf) throws IOException {
1723     return getRegionDegreeLocalityMappingFromFS(
1724         conf, null,
1725         conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE));
1726 
1727   }
1728 
1729   /**
1730    * This function is to scan the root path of the file system to get the
1731    * degree of locality for each region on each of the servers having at least
1732    * one block of that region.
1733    *
1734    * @param conf
1735    *          the configuration to use
1736    * @param desiredTable
1737    *          the table you wish to scan locality for
1738    * @param threadPoolSize
1739    *          the thread pool size to use
1740    * @return the mapping from region encoded name to a map of server names to
1741    *           locality fraction
1742    * @throws IOException
1743    *           in case of file system errors or interrupts
1744    */
1745   public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1746       final Configuration conf, final String desiredTable, int threadPoolSize)
1747       throws IOException {
1748     Map<String, Map<String, Float>> regionDegreeLocalityMapping =
1749         new ConcurrentHashMap<String, Map<String, Float>>();
1750     getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null,
1751         regionDegreeLocalityMapping);
1752     return regionDegreeLocalityMapping;
1753   }
1754 
1755   /**
1756    * This function is to scan the root path of the file system to get either the
1757    * mapping between the region name and its best locality region server or the
1758    * degree of locality of each region on each of the servers having at least
1759    * one block of that region. The output map parameters are both optional.
1760    *
1761    * @param conf
1762    *          the configuration to use
1763    * @param desiredTable
1764    *          the table you wish to scan locality for
1765    * @param threadPoolSize
1766    *          the thread pool size to use
1767    * @param regionToBestLocalityRSMapping
1768    *          the map into which to put the best locality mapping or null
1769    * @param regionDegreeLocalityMapping
1770    *          the map into which to put the locality degree mapping or null,
1771    *          must be a thread-safe implementation
1772    * @throws IOException
1773    *           in case of file system errors or interrupts
1774    */
1775   private static void getRegionLocalityMappingFromFS(
1776       final Configuration conf, final String desiredTable,
1777       int threadPoolSize,
1778       Map<String, String> regionToBestLocalityRSMapping,
1779       Map<String, Map<String, Float>> regionDegreeLocalityMapping)
1780       throws IOException {
1781     FileSystem fs =  FileSystem.get(conf);
1782     Path rootPath = FSUtils.getRootDir(conf);
1783     long startTime = EnvironmentEdgeManager.currentTime();
1784     Path queryPath;
1785     // The table files are in ${hbase.rootdir}/data/<namespace>/<table>/*
1786     if (null == desiredTable) {
1787       queryPath = new Path(new Path(rootPath, HConstants.BASE_NAMESPACE_DIR).toString() + "/*/*/*/");
1788     } else {
1789       queryPath = new Path(FSUtils.getTableDir(rootPath, TableName.valueOf(desiredTable)).toString() + "/*/");
1790     }
1791 
1792     // reject all paths that are not appropriate
1793     PathFilter pathFilter = new PathFilter() {
1794       @Override
1795       public boolean accept(Path path) {
1796         // this is the region name; it may get some noise data
1797         if (null == path) {
1798           return false;
1799         }
1800 
1801         // no parent?
1802         Path parent = path.getParent();
1803         if (null == parent) {
1804           return false;
1805         }
1806 
1807         String regionName = path.getName();
1808         if (null == regionName) {
1809           return false;
1810         }
1811 
1812         if (!regionName.toLowerCase().matches("[0-9a-f]+")) {
1813           return false;
1814         }
1815         return true;
1816       }
1817     };
1818 
1819     FileStatus[] statusList = fs.globStatus(queryPath, pathFilter);
1820 
1821     if (null == statusList) {
1822       return;
1823     } else {
1824       LOG.debug("Query Path: " + queryPath + " ; # list of files: " +
1825           statusList.length);
1826     }
1827 
1828     // lower the number of threads in case we have very few expected regions
1829     threadPoolSize = Math.min(threadPoolSize, statusList.length);
1830 
1831     // run in multiple threads
1832     ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadPoolSize,
1833         threadPoolSize, 60, TimeUnit.SECONDS,
1834         new ArrayBlockingQueue<Runnable>(statusList.length));
1835     try {
1836       // ignore all file status items that are not of interest
1837       for (FileStatus regionStatus : statusList) {
1838         if (null == regionStatus) {
1839           continue;
1840         }
1841 
1842         if (!regionStatus.isDirectory()) {
1843           continue;
1844         }
1845 
1846         Path regionPath = regionStatus.getPath();
1847         if (null == regionPath) {
1848           continue;
1849         }
1850 
1851         tpe.execute(new FSRegionScanner(fs, regionPath,
1852             regionToBestLocalityRSMapping, regionDegreeLocalityMapping));
1853       }
1854     } finally {
1855       tpe.shutdown();
1856       int threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY,
1857           60 * 1000);
1858       try {
1859         // here we wait until TPE terminates, which is either naturally or by
1860         // exceptions in the execution of the threads
1861         while (!tpe.awaitTermination(threadWakeFrequency,
1862             TimeUnit.MILLISECONDS)) {
1863           // printing out rough estimate, so as to not introduce
1864           // AtomicInteger
1865           LOG.info("Locality checking is underway: { Scanned Regions : "
1866               + tpe.getCompletedTaskCount() + "/"
1867               + tpe.getTaskCount() + " }");
1868         }
1869       } catch (InterruptedException e) {
1870         throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1871       }
1872     }
1873 
1874     long overhead = EnvironmentEdgeManager.currentTime() - startTime;
1875     String overheadMsg = "Scan DFS for locality info takes " + overhead + " ms";
1876 
1877     LOG.info(overheadMsg);
1878   }
1879 
1880   /**
1881    * Do our short circuit read setup.
1882    * Checks buffer size to use and whether to do checksumming in hbase or hdfs.
1883    * @param conf
1884    */
1885   public static void setupShortCircuitRead(final Configuration conf) {
1886     // Check that the user has not set the "dfs.client.read.shortcircuit.skip.checksum" property.
1887     boolean shortCircuitSkipChecksum =
1888       conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
1889     boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
1890     if (shortCircuitSkipChecksum) {
1891       LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
1892         "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
1893         "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
1894       assert !shortCircuitSkipChecksum; //this will fail if assertions are on
1895     }
1896     checkShortCircuitReadBufferSize(conf);
1897   }
1898 
1899   /**
1900    * Check if short circuit read buffer size is set and if not, set it to hbase value.
1901    * @param conf
1902    */
1903   public static void checkShortCircuitReadBufferSize(final Configuration conf) {
1904     final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
1905     final int notSet = -1;
1906     // DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY is only defined in h2
1907     final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
1908     int size = conf.getInt(dfsKey, notSet);
1909     // If a size is set, return -- we will use it.
1910     if (size != notSet) return;
1911     // But short circuit buffer size is normally not set.  Put in place the hbase wanted size.
1912     int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
1913     conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
1914   }
1915 
1916   /**
1917    * @param c
1918    * @return The DFSClient DFSHedgedReadMetrics instance or null if can't be found or not on hdfs.
1919    * @throws IOException 
1920    */
1921   public static DFSHedgedReadMetrics getDFSHedgedReadMetrics(final Configuration c)
1922       throws IOException {
1923     if (!isHDFS(c)) return null;
1924     // getHedgedReadMetrics is package private. Get the DFSClient instance that is internal
1925     // to the DFS FS instance and make the method getHedgedReadMetrics accessible, then invoke it
1926     // to get the singleton instance of DFSHedgedReadMetrics shared by DFSClients.
1927     final String name = "getHedgedReadMetrics";
1928     DFSClient dfsclient = ((DistributedFileSystem)FileSystem.get(c)).getClient();
1929     Method m;
1930     try {
1931       m = dfsclient.getClass().getDeclaredMethod(name);
1932     } catch (NoSuchMethodException e) {
1933       LOG.warn("Failed find method " + name + " in dfsclient; no hedged read metrics: " +
1934           e.getMessage());
1935       return null;
1936     } catch (SecurityException e) {
1937       LOG.warn("Failed find method " + name + " in dfsclient; no hedged read metrics: " +
1938           e.getMessage());
1939       return null;
1940     }
1941     m.setAccessible(true);
1942     try {
1943       return (DFSHedgedReadMetrics)m.invoke(dfsclient);
1944     } catch (IllegalAccessException e) {
1945       LOG.warn("Failed invoking method " + name + " on dfsclient; no hedged read metrics: " +
1946           e.getMessage());
1947       return null;
1948     } catch (IllegalArgumentException e) {
1949       LOG.warn("Failed invoking method " + name + " on dfsclient; no hedged read metrics: " +
1950           e.getMessage());
1951       return null;
1952     } catch (InvocationTargetException e) {
1953       LOG.warn("Failed invoking method " + name + " on dfsclient; no hedged read metrics: " +
1954           e.getMessage());
1955       return null;
1956     }
1957   }
1958 }