View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.util;
20  
21  import java.io.ByteArrayInputStream;
22  import java.io.DataInputStream;
23  import java.io.EOFException;
24  import java.io.FileNotFoundException;
25  import java.io.IOException;
26  import java.io.InputStream;
27  import java.io.InterruptedIOException;
28  import java.lang.reflect.InvocationTargetException;
29  import java.lang.reflect.Method;
30  import java.net.InetSocketAddress;
31  import java.net.URI;
32  import java.net.URISyntaxException;
33  import java.util.ArrayList;
34  import java.util.Collections;
35  import java.util.HashMap;
36  import java.util.LinkedList;
37  import java.util.List;
38  import java.util.Map;
39  import java.util.concurrent.ArrayBlockingQueue;
40  import java.util.concurrent.ConcurrentHashMap;
41  import java.util.concurrent.ThreadPoolExecutor;
42  import java.util.concurrent.TimeUnit;
43  import java.util.regex.Pattern;
44  
45  import org.apache.commons.logging.Log;
46  import org.apache.commons.logging.LogFactory;
47  import org.apache.hadoop.classification.InterfaceAudience;
48  import org.apache.hadoop.conf.Configuration;
49  import org.apache.hadoop.fs.BlockLocation;
50  import org.apache.hadoop.fs.FSDataInputStream;
51  import org.apache.hadoop.fs.FSDataOutputStream;
52  import org.apache.hadoop.fs.FileStatus;
53  import org.apache.hadoop.fs.FileSystem;
54  import org.apache.hadoop.fs.Path;
55  import org.apache.hadoop.fs.PathFilter;
56  import org.apache.hadoop.fs.permission.FsAction;
57  import org.apache.hadoop.fs.permission.FsPermission;
58  import org.apache.hadoop.hbase.ClusterId;
59  import org.apache.hadoop.hbase.HColumnDescriptor;
60  import org.apache.hadoop.hbase.HConstants;
61  import org.apache.hadoop.hbase.HDFSBlocksDistribution;
62  import org.apache.hadoop.hbase.HRegionInfo;
63  import org.apache.hadoop.hbase.TableName;
64  import org.apache.hadoop.hbase.exceptions.DeserializationException;
65  import org.apache.hadoop.hbase.fs.HFileSystem;
66  import org.apache.hadoop.hbase.master.HMaster;
67  import org.apache.hadoop.hbase.master.RegionPlacementMaintainer;
68  import org.apache.hadoop.hbase.security.AccessDeniedException;
69  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
70  import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
71  import org.apache.hadoop.hbase.regionserver.HRegion;
72  import org.apache.hadoop.hdfs.DistributedFileSystem;
73  import org.apache.hadoop.hdfs.protocol.FSConstants;
74  import org.apache.hadoop.io.IOUtils;
75  import org.apache.hadoop.io.SequenceFile;
76  import org.apache.hadoop.ipc.RemoteException;
77  import org.apache.hadoop.security.UserGroupInformation;
78  import org.apache.hadoop.util.Progressable;
79  import org.apache.hadoop.util.ReflectionUtils;
80  import org.apache.hadoop.util.StringUtils;
81  
82  import com.google.common.primitives.Ints;
83  import com.google.protobuf.InvalidProtocolBufferException;
84  
85  /**
86   * Utility methods for interacting with the underlying file system.
87   */
88  @InterfaceAudience.Private
89  public abstract class FSUtils {
90    private static final Log LOG = LogFactory.getLog(FSUtils.class);
91  
92    /** Full access permissions (starting point for a umask) */
93    public static final String FULL_RWX_PERMISSIONS = "777";
94    private static final String THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize";
95    private static final int DEFAULT_THREAD_POOLSIZE = 2;
96  
97    /** Set to true on Windows platforms */
98    public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
99  
100   protected FSUtils() {
101     super();
102   }
103 
104   /**
105    * Compare of path component. Does not consider schema; i.e. if schemas different but <code>path
106    * <code> starts with <code>rootPath<code>, then the function returns true
107    * @param rootPath
108    * @param path
109    * @return True if <code>path</code> starts with <code>rootPath</code>
110    */
111   public static boolean isStartingWithPath(final Path rootPath, final String path) {
112     String uriRootPath = rootPath.toUri().getPath();
113     String tailUriPath = (new Path(path)).toUri().getPath();
114     return tailUriPath.startsWith(uriRootPath);
115   }
116 
117   /**
118    * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
119    * '/a/b/c' part. Does not consider schema; i.e. if schemas different but path or subpath matches,
120    * the two will equate.
121    * @param pathToSearch Path we will be trying to match.
122    * @param pathTail
123    * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
124    */
125   public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
126     return isMatchingTail(pathToSearch, new Path(pathTail));
127   }
128 
129   /**
130    * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
131    * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true.  Does not consider
132    * schema; i.e. if schemas different but path or subpath matches, the two will equate.
133    * @param pathToSearch Path we will be trying to match.
134    * @param pathTail
135    * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
136    */
137   public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
138     if (pathToSearch.depth() != pathTail.depth()) return false;
139     Path tailPath = pathTail;
140     String tailName;
141     Path toSearch = pathToSearch;
142     String toSearchName;
143     boolean result = false;
144     do {
145       tailName = tailPath.getName();
146       if (tailName == null || tailName.length() <= 0) {
147         result = true;
148         break;
149       }
150       toSearchName = toSearch.getName();
151       if (toSearchName == null || toSearchName.length() <= 0) break;
152       // Move up a parent on each path for next go around.  Path doesn't let us go off the end.
153       tailPath = tailPath.getParent();
154       toSearch = toSearch.getParent();
155     } while(tailName.equals(toSearchName));
156     return result;
157   }
158 
159   public static FSUtils getInstance(FileSystem fs, Configuration conf) {
160     String scheme = fs.getUri().getScheme();
161     if (scheme == null) {
162       LOG.warn("Could not find scheme for uri " +
163           fs.getUri() + ", default to hdfs");
164       scheme = "hdfs";
165     }
166     Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
167         scheme + ".impl", FSHDFSUtils.class); // Default to HDFS impl
168     FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
169     return fsUtils;
170   }
171 
172   /**
173    * Delete if exists.
174    * @param fs filesystem object
175    * @param dir directory to delete
176    * @return True if deleted <code>dir</code>
177    * @throws IOException e
178    */
179   public static boolean deleteDirectory(final FileSystem fs, final Path dir)
180   throws IOException {
181     return fs.exists(dir) && fs.delete(dir, true);
182   }
183 
184   /**
185    * Return the number of bytes that large input files should be optimally
186    * be split into to minimize i/o time.
187    *
188    * use reflection to search for getDefaultBlockSize(Path f)
189    * if the method doesn't exist, fall back to using getDefaultBlockSize()
190    *
191    * @param fs filesystem object
192    * @return the default block size for the path's filesystem
193    * @throws IOException e
194    */
195   public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
196     Method m = null;
197     Class<? extends FileSystem> cls = fs.getClass();
198     try {
199       m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
200     } catch (NoSuchMethodException e) {
201       LOG.info("FileSystem doesn't support getDefaultBlockSize");
202     } catch (SecurityException e) {
203       LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
204       m = null; // could happen on setAccessible()
205     }
206     if (m == null) {
207       return fs.getDefaultBlockSize(path);
208     } else {
209       try {
210         Object ret = m.invoke(fs, path);
211         return ((Long)ret).longValue();
212       } catch (Exception e) {
213         throw new IOException(e);
214       }
215     }
216   }
217 
218   /*
219    * Get the default replication.
220    *
221    * use reflection to search for getDefaultReplication(Path f)
222    * if the method doesn't exist, fall back to using getDefaultReplication()
223    *
224    * @param fs filesystem object
225    * @param f path of file
226    * @return default replication for the path's filesystem
227    * @throws IOException e
228    */
229   public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
230     Method m = null;
231     Class<? extends FileSystem> cls = fs.getClass();
232     try {
233       m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
234     } catch (NoSuchMethodException e) {
235       LOG.info("FileSystem doesn't support getDefaultReplication");
236     } catch (SecurityException e) {
237       LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
238       m = null; // could happen on setAccessible()
239     }
240     if (m == null) {
241       return fs.getDefaultReplication(path);
242     } else {
243       try {
244         Object ret = m.invoke(fs, path);
245         return ((Number)ret).shortValue();
246       } catch (Exception e) {
247         throw new IOException(e);
248       }
249     }
250   }
251 
252   /**
253    * Returns the default buffer size to use during writes.
254    *
255    * The size of the buffer should probably be a multiple of hardware
256    * page size (4096 on Intel x86), and it determines how much data is
257    * buffered during read and write operations.
258    *
259    * @param fs filesystem object
260    * @return default buffer size to use during writes
261    */
262   public static int getDefaultBufferSize(final FileSystem fs) {
263     return fs.getConf().getInt("io.file.buffer.size", 4096);
264   }
265 
266   /**
267    * Create the specified file on the filesystem. By default, this will:
268    * <ol>
269    * <li>overwrite the file if it exists</li>
270    * <li>apply the umask in the configuration (if it is enabled)</li>
271    * <li>use the fs configured buffer size (or 4096 if not set)</li>
272    * <li>use the default replication</li>
273    * <li>use the default block size</li>
274    * <li>not track progress</li>
275    * </ol>
276    *
277    * @param fs {@link FileSystem} on which to write the file
278    * @param path {@link Path} to the file to write
279    * @param perm permissions
280    * @param favoredNodes
281    * @return output stream to the created file
282    * @throws IOException if the file cannot be created
283    */
284   public static FSDataOutputStream create(FileSystem fs, Path path,
285       FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException {
286     if (fs instanceof HFileSystem) {
287       FileSystem backingFs = ((HFileSystem)fs).getBackingFs();
288       if (backingFs instanceof DistributedFileSystem) {
289         // Try to use the favoredNodes version via reflection to allow backwards-
290         // compatibility.
291         try {
292           return (FSDataOutputStream) (DistributedFileSystem.class
293               .getDeclaredMethod("create", Path.class, FsPermission.class,
294                   boolean.class, int.class, short.class, long.class,
295                   Progressable.class, InetSocketAddress[].class)
296                   .invoke(backingFs, path, perm, true,
297                       getDefaultBufferSize(backingFs),
298                       getDefaultReplication(backingFs, path),
299                       getDefaultBlockSize(backingFs, path),
300                       null, favoredNodes));
301         } catch (InvocationTargetException ite) {
302           // Function was properly called, but threw it's own exception.
303           throw new IOException(ite.getCause());
304         } catch (NoSuchMethodException e) {
305           LOG.debug("DFS Client does not support most favored nodes create; using default create");
306           if (LOG.isTraceEnabled()) LOG.trace("Ignoring; use default create", e);
307         } catch (IllegalArgumentException e) {
308           LOG.debug("Ignoring (most likely Reflection related exception) " + e);
309         } catch (SecurityException e) {
310           LOG.debug("Ignoring (most likely Reflection related exception) " + e);
311         } catch (IllegalAccessException e) {
312           LOG.debug("Ignoring (most likely Reflection related exception) " + e);
313         }
314       }
315     }
316     return create(fs, path, perm, true);
317   }
318 
319   /**
320    * Create the specified file on the filesystem. By default, this will:
321    * <ol>
322    * <li>apply the umask in the configuration (if it is enabled)</li>
323    * <li>use the fs configured buffer size (or 4096 if not set)</li>
324    * <li>use the default replication</li>
325    * <li>use the default block size</li>
326    * <li>not track progress</li>
327    * </ol>
328    *
329    * @param fs {@link FileSystem} on which to write the file
330    * @param path {@link Path} to the file to write
331    * @param perm
332    * @param overwrite Whether or not the created file should be overwritten.
333    * @return output stream to the created file
334    * @throws IOException if the file cannot be created
335    */
336   public static FSDataOutputStream create(FileSystem fs, Path path,
337       FsPermission perm, boolean overwrite) throws IOException {
338     if (LOG.isTraceEnabled()) {
339       LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
340     }
341     return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
342         getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
343   }
344 
345   /**
346    * Get the file permissions specified in the configuration, if they are
347    * enabled.
348    *
349    * @param fs filesystem that the file will be created on.
350    * @param conf configuration to read for determining if permissions are
351    *          enabled and which to use
352    * @param permssionConfKey property key in the configuration to use when
353    *          finding the permission
354    * @return the permission to use when creating a new file on the fs. If
355    *         special permissions are not specified in the configuration, then
356    *         the default permissions on the the fs will be returned.
357    */
358   public static FsPermission getFilePermissions(final FileSystem fs,
359       final Configuration conf, final String permssionConfKey) {
360     boolean enablePermissions = conf.getBoolean(
361         HConstants.ENABLE_DATA_FILE_UMASK, false);
362 
363     if (enablePermissions) {
364       try {
365         FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
366         // make sure that we have a mask, if not, go default.
367         String mask = conf.get(permssionConfKey);
368         if (mask == null)
369           return FsPermission.getFileDefault();
370         // appy the umask
371         FsPermission umask = new FsPermission(mask);
372         return perm.applyUMask(umask);
373       } catch (IllegalArgumentException e) {
374         LOG.warn(
375             "Incorrect umask attempted to be created: "
376                 + conf.get(permssionConfKey)
377                 + ", using default file permissions.", e);
378         return FsPermission.getFileDefault();
379       }
380     }
381     return FsPermission.getFileDefault();
382   }
383 
384   /**
385    * Checks to see if the specified file system is available
386    *
387    * @param fs filesystem
388    * @throws IOException e
389    */
390   public static void checkFileSystemAvailable(final FileSystem fs)
391   throws IOException {
392     if (!(fs instanceof DistributedFileSystem)) {
393       return;
394     }
395     IOException exception = null;
396     DistributedFileSystem dfs = (DistributedFileSystem) fs;
397     try {
398       if (dfs.exists(new Path("/"))) {
399         return;
400       }
401     } catch (IOException e) {
402       exception = e instanceof RemoteException ?
403               ((RemoteException)e).unwrapRemoteException() : e;
404     }
405     try {
406       fs.close();
407     } catch (Exception e) {
408       LOG.error("file system close failed: ", e);
409     }
410     IOException io = new IOException("File system is not available");
411     io.initCause(exception);
412     throw io;
413   }
414 
415   /**
416    * We use reflection because {@link DistributedFileSystem#setSafeMode(
417    * FSConstants.SafeModeAction action, boolean isChecked)} is not in hadoop 1.1
418    *
419    * @param dfs
420    * @return whether we're in safe mode
421    * @throws IOException
422    */
423   private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
424     boolean inSafeMode = false;
425     try {
426       Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
427           org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
428       inSafeMode = (Boolean) m.invoke(dfs,
429         org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
430     } catch (Exception e) {
431       if (e instanceof IOException) throw (IOException) e;
432 
433       // Check whether dfs is on safemode.
434       inSafeMode = dfs.setSafeMode(
435         org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
436     }
437     return inSafeMode;
438   }
439 
440   /**
441    * Check whether dfs is in safemode.
442    * @param conf
443    * @throws IOException
444    */
445   public static void checkDfsSafeMode(final Configuration conf)
446   throws IOException {
447     boolean isInSafeMode = false;
448     FileSystem fs = FileSystem.get(conf);
449     if (fs instanceof DistributedFileSystem) {
450       DistributedFileSystem dfs = (DistributedFileSystem)fs;
451       isInSafeMode = isInSafeMode(dfs);
452     }
453     if (isInSafeMode) {
454       throw new IOException("File system is in safemode, it can't be written now");
455     }
456   }
457 
458   /**
459    * Verifies current version of file system
460    *
461    * @param fs filesystem object
462    * @param rootdir root hbase directory
463    * @return null if no version file exists, version string otherwise.
464    * @throws IOException e
465    * @throws org.apache.hadoop.hbase.exceptions.DeserializationException
466    */
467   public static String getVersion(FileSystem fs, Path rootdir)
468   throws IOException, DeserializationException {
469     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
470     FileStatus[] status = null;
471     try {
472       // hadoop 2.0 throws FNFE if directory does not exist.
473       // hadoop 1.0 returns null if directory does not exist.
474       status = fs.listStatus(versionFile);
475     } catch (FileNotFoundException fnfe) {
476       return null;
477     }
478     if (status == null || status.length == 0) return null;
479     String version = null;
480     byte [] content = new byte [(int)status[0].getLen()];
481     FSDataInputStream s = fs.open(versionFile);
482     try {
483       IOUtils.readFully(s, content, 0, content.length);
484       if (ProtobufUtil.isPBMagicPrefix(content)) {
485         version = parseVersionFrom(content);
486       } else {
487         // Presume it pre-pb format.
488         InputStream is = new ByteArrayInputStream(content);
489         DataInputStream dis = new DataInputStream(is);
490         try {
491           version = dis.readUTF();
492         } finally {
493           dis.close();
494         }
495       }
496     } catch (EOFException eof) {
497       LOG.warn("Version file was empty, odd, will try to set it.");
498     } finally {
499       s.close();
500     }
501     return version;
502   }
503 
504   /**
505    * Parse the content of the ${HBASE_ROOTDIR}/hbase.version file.
506    * @param bytes The byte content of the hbase.version file.
507    * @return The version found in the file as a String.
508    * @throws DeserializationException
509    */
510   static String parseVersionFrom(final byte [] bytes)
511   throws DeserializationException {
512     ProtobufUtil.expectPBMagicPrefix(bytes);
513     int pblen = ProtobufUtil.lengthOfPBMagic();
514     FSProtos.HBaseVersionFileContent.Builder builder =
515       FSProtos.HBaseVersionFileContent.newBuilder();
516     FSProtos.HBaseVersionFileContent fileContent;
517     try {
518       fileContent = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
519       return fileContent.getVersion();
520     } catch (InvalidProtocolBufferException e) {
521       // Convert
522       throw new DeserializationException(e);
523     }
524   }
525 
526   /**
527    * Create the content to write into the ${HBASE_ROOTDIR}/hbase.version file.
528    * @param version Version to persist
529    * @return Serialized protobuf with <code>version</code> content and a bit of pb magic for a prefix.
530    */
531   static byte [] toVersionByteArray(final String version) {
532     FSProtos.HBaseVersionFileContent.Builder builder =
533       FSProtos.HBaseVersionFileContent.newBuilder();
534     return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
535   }
536 
537   /**
538    * Verifies current version of file system
539    *
540    * @param fs file system
541    * @param rootdir root directory of HBase installation
542    * @param message if true, issues a message on System.out
543    *
544    * @throws IOException e
545    * @throws DeserializationException
546    */
547   public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
548   throws IOException, DeserializationException {
549     checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
550   }
551 
552   /**
553    * Verifies current version of file system
554    *
555    * @param fs file system
556    * @param rootdir root directory of HBase installation
557    * @param message if true, issues a message on System.out
558    * @param wait wait interval
559    * @param retries number of times to retry
560    *
561    * @throws IOException e
562    * @throws DeserializationException
563    */
564   public static void checkVersion(FileSystem fs, Path rootdir,
565       boolean message, int wait, int retries)
566   throws IOException, DeserializationException {
567     String version = getVersion(fs, rootdir);
568     if (version == null) {
569       if (!metaRegionExists(fs, rootdir)) {
570         // rootDir is empty (no version file and no root region)
571         // just create new version file (HBASE-1195)
572         setVersion(fs, rootdir, wait, retries);
573         return;
574       }
575     } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return;
576 
577     // version is deprecated require migration
578     // Output on stdout so user sees it in terminal.
579     String msg = "HBase file layout needs to be upgraded."
580       + " You have version " + version
581       + " and I want version " + HConstants.FILE_SYSTEM_VERSION
582       + ". Consult http://hbase.apache.org/book.html for further information about upgrading HBase."
583       + " Is your hbase.rootdir valid? If so, you may need to run "
584       + "'hbase hbck -fixVersionFile'.";
585     if (message) {
586       System.out.println("WARNING! " + msg);
587     }
588     throw new FileSystemVersionException(msg);
589   }
590 
591   /**
592    * Sets version of file system
593    *
594    * @param fs filesystem object
595    * @param rootdir hbase root
596    * @throws IOException e
597    */
598   public static void setVersion(FileSystem fs, Path rootdir)
599   throws IOException {
600     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
601       HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
602   }
603 
604   /**
605    * Sets version of file system
606    *
607    * @param fs filesystem object
608    * @param rootdir hbase root
609    * @param wait time to wait for retry
610    * @param retries number of times to retry before failing
611    * @throws IOException e
612    */
613   public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
614   throws IOException {
615     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
616   }
617 
618 
619   /**
620    * Sets version of file system
621    *
622    * @param fs filesystem object
623    * @param rootdir hbase root directory
624    * @param version version to set
625    * @param wait time to wait for retry
626    * @param retries number of times to retry before throwing an IOException
627    * @throws IOException e
628    */
629   public static void setVersion(FileSystem fs, Path rootdir, String version,
630       int wait, int retries) throws IOException {
631     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
632     Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR +
633       HConstants.VERSION_FILE_NAME);
634     while (true) {
635       try {
636         // Write the version to a temporary file
637         FSDataOutputStream s = fs.create(tempVersionFile);
638         try {
639           s.write(toVersionByteArray(version));
640           s.close();
641           s = null;
642           // Move the temp version file to its normal location. Returns false
643           // if the rename failed. Throw an IOE in that case.
644           if (!fs.rename(tempVersionFile, versionFile)) {
645             throw new IOException("Unable to move temp version file to " + versionFile);
646           }
647         } finally {
648           // Cleaning up the temporary if the rename failed would be trying
649           // too hard. We'll unconditionally create it again the next time
650           // through anyway, files are overwritten by default by create().
651 
652           // Attempt to close the stream on the way out if it is still open.
653           try {
654             if (s != null) s.close();
655           } catch (IOException ignore) { }
656         }
657         LOG.debug("Created version file at " + rootdir.toString() + " with version=" + version);
658         return;
659       } catch (IOException e) {
660         if (retries > 0) {
661           LOG.warn("Unable to create version file at " + rootdir.toString() + ", retrying", e);
662           fs.delete(versionFile, false);
663           try {
664             if (wait > 0) {
665               Thread.sleep(wait);
666             }
667           } catch (InterruptedException ie) {
668             throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
669           }
670           retries--;
671         } else {
672           throw e;
673         }
674       }
675     }
676   }
677 
678   /**
679    * Checks that a cluster ID file exists in the HBase root directory
680    * @param fs the root directory FileSystem
681    * @param rootdir the HBase root directory in HDFS
682    * @param wait how long to wait between retries
683    * @return <code>true</code> if the file exists, otherwise <code>false</code>
684    * @throws IOException if checking the FileSystem fails
685    */
686   public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
687       int wait) throws IOException {
688     while (true) {
689       try {
690         Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
691         return fs.exists(filePath);
692       } catch (IOException ioe) {
693         if (wait > 0) {
694           LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
695               ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
696           try {
697             Thread.sleep(wait);
698           } catch (InterruptedException e) {
699             throw (InterruptedIOException)new InterruptedIOException().initCause(e);
700           }
701         } else {
702           throw ioe;
703         }
704       }
705     }
706   }
707 
708   /**
709    * Returns the value of the unique cluster ID stored for this HBase instance.
710    * @param fs the root directory FileSystem
711    * @param rootdir the path to the HBase root directory
712    * @return the unique cluster identifier
713    * @throws IOException if reading the cluster ID file fails
714    */
715   public static ClusterId getClusterId(FileSystem fs, Path rootdir)
716   throws IOException {
717     Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
718     ClusterId clusterId = null;
719     FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath):  null;
720     if (status != null) {
721       int len = Ints.checkedCast(status.getLen());
722       byte [] content = new byte[len];
723       FSDataInputStream in = fs.open(idPath);
724       try {
725         in.readFully(content);
726       } catch (EOFException eof) {
727         LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
728       } finally{
729         in.close();
730       }
731       try {
732         clusterId = ClusterId.parseFrom(content);
733       } catch (DeserializationException e) {
734         throw new IOException("content=" + Bytes.toString(content), e);
735       }
736       // If not pb'd, make it so.
737       if (!ProtobufUtil.isPBMagicPrefix(content)) {
738         String cid = null;
739         in = fs.open(idPath);
740         try {
741           cid = in.readUTF();
742           clusterId = new ClusterId(cid);
743         } catch (EOFException eof) {
744           LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
745         } finally {
746           in.close();
747         }
748         rewriteAsPb(fs, rootdir, idPath, clusterId);
749       }
750       return clusterId;
751     } else {
752       LOG.warn("Cluster ID file does not exist at " + idPath.toString());
753     }
754     return clusterId;
755   }
756 
757   /**
758    * @param cid
759    * @throws IOException
760    */
761   private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
762       final ClusterId cid)
763   throws IOException {
764     // Rewrite the file as pb.  Move aside the old one first, write new
765     // then delete the moved-aside file.
766     Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
767     if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
768     setClusterId(fs, rootdir, cid, 100);
769     if (!fs.delete(movedAsideName, false)) {
770       throw new IOException("Failed delete of " + movedAsideName);
771     }
772     LOG.debug("Rewrote the hbase.id file as pb");
773   }
774 
775   /**
776    * Writes a new unique identifier for this cluster to the "hbase.id" file
777    * in the HBase root directory
778    * @param fs the root directory FileSystem
779    * @param rootdir the path to the HBase root directory
780    * @param clusterId the unique identifier to store
781    * @param wait how long (in milliseconds) to wait between retries
782    * @throws IOException if writing to the FileSystem fails and no wait value
783    */
784   public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
785       int wait) throws IOException {
786     while (true) {
787       try {
788         Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
789         Path tempIdFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY +
790           Path.SEPARATOR + HConstants.CLUSTER_ID_FILE_NAME);
791         // Write the id file to a temporary location
792         FSDataOutputStream s = fs.create(tempIdFile);
793         try {
794           s.write(clusterId.toByteArray());
795           s.close();
796           s = null;
797           // Move the temporary file to its normal location. Throw an IOE if
798           // the rename failed
799           if (!fs.rename(tempIdFile, idFile)) {
800             throw new IOException("Unable to move temp version file to " + idFile);
801           }
802         } finally {
803           // Attempt to close the stream if still open on the way out
804           try {
805             if (s != null) s.close();
806           } catch (IOException ignore) { }
807         }
808         if (LOG.isDebugEnabled()) {
809           LOG.debug("Created cluster ID file at " + idFile.toString() + " with ID: " + clusterId);
810         }
811         return;
812       } catch (IOException ioe) {
813         if (wait > 0) {
814           LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
815               ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
816           try {
817             Thread.sleep(wait);
818           } catch (InterruptedException e) {
819             throw (InterruptedIOException)new InterruptedIOException().initCause(e);
820           }
821         } else {
822           throw ioe;
823         }
824       }
825     }
826   }
827 
828   /**
829    * Verifies root directory path is a valid URI with a scheme
830    *
831    * @param root root directory path
832    * @return Passed <code>root</code> argument.
833    * @throws IOException if not a valid URI with a scheme
834    */
835   public static Path validateRootPath(Path root) throws IOException {
836     try {
837       URI rootURI = new URI(root.toString());
838       String scheme = rootURI.getScheme();
839       if (scheme == null) {
840         throw new IOException("Root directory does not have a scheme");
841       }
842       return root;
843     } catch (URISyntaxException e) {
844       IOException io = new IOException("Root directory path is not a valid " +
845         "URI -- check your " + HConstants.HBASE_DIR + " configuration");
846       io.initCause(e);
847       throw io;
848     }
849   }
850 
851   /**
852    * Checks for the presence of the root path (using the provided conf object) in the given path. If
853    * it exists, this method removes it and returns the String representation of remaining relative path.
854    * @param path
855    * @param conf
856    * @return String representation of the remaining relative path
857    * @throws IOException
858    */
859   public static String removeRootPath(Path path, final Configuration conf) throws IOException {
860     Path root = FSUtils.getRootDir(conf);
861     String pathStr = path.toString();
862     // check that the path is absolute... it has the root path in it.
863     if (!pathStr.startsWith(root.toString())) return pathStr;
864     // if not, return as it is.
865     return pathStr.substring(root.toString().length() + 1);// remove the "/" too.
866   }
867 
868   /**
869    * If DFS, check safe mode and if so, wait until we clear it.
870    * @param conf configuration
871    * @param wait Sleep between retries
872    * @throws IOException e
873    */
874   public static void waitOnSafeMode(final Configuration conf,
875     final long wait)
876   throws IOException {
877     FileSystem fs = FileSystem.get(conf);
878     if (!(fs instanceof DistributedFileSystem)) return;
879     DistributedFileSystem dfs = (DistributedFileSystem)fs;
880     // Make sure dfs is not in safe mode
881     while (isInSafeMode(dfs)) {
882       LOG.info("Waiting for dfs to exit safe mode...");
883       try {
884         Thread.sleep(wait);
885       } catch (InterruptedException e) {
886         throw (InterruptedIOException)new InterruptedIOException().initCause(e);
887       }
888     }
889   }
890 
891   /**
892    * Return the 'path' component of a Path.  In Hadoop, Path is an URI.  This
893    * method returns the 'path' component of a Path's URI: e.g. If a Path is
894    * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>,
895    * this method returns <code>/hbase_trunk/TestTable/compaction.dir</code>.
896    * This method is useful if you want to print out a Path without qualifying
897    * Filesystem instance.
898    * @param p Filesystem Path whose 'path' component we are to return.
899    * @return Path portion of the Filesystem
900    */
901   public static String getPath(Path p) {
902     return p.toUri().getPath();
903   }
904 
905   /**
906    * @param c configuration
907    * @return Path to hbase root directory: i.e. <code>hbase.rootdir</code> from
908    * configuration as a qualified Path.
909    * @throws IOException e
910    */
911   public static Path getRootDir(final Configuration c) throws IOException {
912     Path p = new Path(c.get(HConstants.HBASE_DIR));
913     FileSystem fs = p.getFileSystem(c);
914     return p.makeQualified(fs);
915   }
916 
917   public static void setRootDir(final Configuration c, final Path root) throws IOException {
918     c.set(HConstants.HBASE_DIR, root.toString());
919   }
920 
921   public static void setFsDefault(final Configuration c, final Path root) throws IOException {
922     c.set("fs.defaultFS", root.toString());    // for hadoop 0.21+
923   }
924 
925   /**
926    * Checks if meta region exists
927    *
928    * @param fs file system
929    * @param rootdir root directory of HBase installation
930    * @return true if exists
931    * @throws IOException e
932    */
933   @SuppressWarnings("deprecation")
934   public static boolean metaRegionExists(FileSystem fs, Path rootdir)
935   throws IOException {
936     Path metaRegionDir =
937       HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
938     return fs.exists(metaRegionDir);
939   }
940 
941   /**
942    * Compute HDFS blocks distribution of a given file, or a portion of the file
943    * @param fs file system
944    * @param status file status of the file
945    * @param start start position of the portion
946    * @param length length of the portion
947    * @return The HDFS blocks distribution
948    */
949   static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
950     final FileSystem fs, FileStatus status, long start, long length)
951     throws IOException {
952     HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
953     BlockLocation [] blockLocations =
954       fs.getFileBlockLocations(status, start, length);
955     for(BlockLocation bl : blockLocations) {
956       String [] hosts = bl.getHosts();
957       long len = bl.getLength();
958       blocksDistribution.addHostsAndBlockWeight(hosts, len);
959     }
960 
961     return blocksDistribution;
962   }
963 
964 
965 
966   /**
967    * Runs through the hbase rootdir and checks all stores have only
968    * one file in them -- that is, they've been major compacted.  Looks
969    * at root and meta tables too.
970    * @param fs filesystem
971    * @param hbaseRootDir hbase root directory
972    * @return True if this hbase install is major compacted.
973    * @throws IOException e
974    */
975   public static boolean isMajorCompacted(final FileSystem fs,
976       final Path hbaseRootDir)
977   throws IOException {
978     List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
979     for (Path d : tableDirs) {
980       FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
981       for (FileStatus regionDir : regionDirs) {
982         Path dd = regionDir.getPath();
983         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
984           continue;
985         }
986         // Else its a region name.  Now look in region for families.
987         FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
988         for (FileStatus familyDir : familyDirs) {
989           Path family = familyDir.getPath();
990           // Now in family make sure only one file.
991           FileStatus[] familyStatus = fs.listStatus(family);
992           if (familyStatus.length > 1) {
993             LOG.debug(family.toString() + " has " + familyStatus.length +
994                 " files.");
995             return false;
996           }
997         }
998       }
999     }
1000     return true;
1001   }
1002 
1003   // TODO move this method OUT of FSUtils. No dependencies to HMaster
1004   /**
1005    * Returns the total overall fragmentation percentage. Includes hbase:meta and
1006    * -ROOT- as well.
1007    *
1008    * @param master  The master defining the HBase root and file system.
1009    * @return A map for each table and its percentage.
1010    * @throws IOException When scanning the directory fails.
1011    */
1012   public static int getTotalTableFragmentation(final HMaster master)
1013   throws IOException {
1014     Map<String, Integer> map = getTableFragmentation(master);
1015     return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
1016   }
1017 
1018   /**
1019    * Runs through the HBase rootdir and checks how many stores for each table
1020    * have more than one file in them. Checks -ROOT- and hbase:meta too. The total
1021    * percentage across all tables is stored under the special key "-TOTAL-".
1022    *
1023    * @param master  The master defining the HBase root and file system.
1024    * @return A map for each table and its percentage.
1025    *
1026    * @throws IOException When scanning the directory fails.
1027    */
1028   public static Map<String, Integer> getTableFragmentation(
1029     final HMaster master)
1030   throws IOException {
1031     Path path = getRootDir(master.getConfiguration());
1032     // since HMaster.getFileSystem() is package private
1033     FileSystem fs = path.getFileSystem(master.getConfiguration());
1034     return getTableFragmentation(fs, path);
1035   }
1036 
1037   /**
1038    * Runs through the HBase rootdir and checks how many stores for each table
1039    * have more than one file in them. Checks -ROOT- and hbase:meta too. The total
1040    * percentage across all tables is stored under the special key "-TOTAL-".
1041    *
1042    * @param fs  The file system to use.
1043    * @param hbaseRootDir  The root directory to scan.
1044    * @return A map for each table and its percentage.
1045    * @throws IOException When scanning the directory fails.
1046    */
1047   public static Map<String, Integer> getTableFragmentation(
1048     final FileSystem fs, final Path hbaseRootDir)
1049   throws IOException {
1050     Map<String, Integer> frags = new HashMap<String, Integer>();
1051     int cfCountTotal = 0;
1052     int cfFragTotal = 0;
1053     DirFilter df = new DirFilter(fs);
1054     List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1055     for (Path d : tableDirs) {
1056       int cfCount = 0;
1057       int cfFrag = 0;
1058       FileStatus[] regionDirs = fs.listStatus(d, df);
1059       for (FileStatus regionDir : regionDirs) {
1060         Path dd = regionDir.getPath();
1061         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1062           continue;
1063         }
1064         // else its a region name, now look in region for families
1065         FileStatus[] familyDirs = fs.listStatus(dd, df);
1066         for (FileStatus familyDir : familyDirs) {
1067           cfCount++;
1068           cfCountTotal++;
1069           Path family = familyDir.getPath();
1070           // now in family make sure only one file
1071           FileStatus[] familyStatus = fs.listStatus(family);
1072           if (familyStatus.length > 1) {
1073             cfFrag++;
1074             cfFragTotal++;
1075           }
1076         }
1077       }
1078       // compute percentage per table and store in result list
1079       frags.put(FSUtils.getTableName(d).getNameAsString(),
1080           Math.round((float) cfFrag / cfCount * 100));
1081     }
1082     // set overall percentage for all tables
1083     frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
1084     return frags;
1085   }
1086 
1087   /**
1088    * Expects to find -ROOT- directory.
1089    * @param fs filesystem
1090    * @param hbaseRootDir hbase root directory
1091    * @return True if this a pre020 layout.
1092    * @throws IOException e
1093    */
1094   public static boolean isPre020FileLayout(final FileSystem fs,
1095     final Path hbaseRootDir)
1096   throws IOException {
1097     Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
1098       "70236052"), "info"), "mapfiles");
1099     return fs.exists(mapfiles);
1100   }
1101 
1102   /**
1103    * Runs through the hbase rootdir and checks all stores have only
1104    * one file in them -- that is, they've been major compacted.  Looks
1105    * at root and meta tables too.  This version differs from
1106    * {@link #isMajorCompacted(FileSystem, Path)} in that it expects a
1107    * pre-0.20.0 hbase layout on the filesystem.  Used migrating.
1108    * @param fs filesystem
1109    * @param hbaseRootDir hbase root directory
1110    * @return True if this hbase install is major compacted.
1111    * @throws IOException e
1112    */
1113   public static boolean isMajorCompactedPre020(final FileSystem fs,
1114       final Path hbaseRootDir)
1115   throws IOException {
1116     // Presumes any directory under hbase.rootdir is a table.
1117     List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1118     for (Path d: tableDirs) {
1119       // Inside a table, there are compaction.dir directories to skip.
1120       // Otherwise, all else should be regions.  Then in each region, should
1121       // only be family directories.  Under each of these, should be a mapfile
1122       // and info directory and in these only one file.
1123       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
1124         continue;
1125       }
1126       FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
1127       for (FileStatus regionDir : regionDirs) {
1128         Path dd = regionDir.getPath();
1129         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1130           continue;
1131         }
1132         // Else its a region name.  Now look in region for families.
1133         FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
1134         for (FileStatus familyDir : familyDirs) {
1135           Path family = familyDir.getPath();
1136           FileStatus[] infoAndMapfile = fs.listStatus(family);
1137           // Assert that only info and mapfile in family dir.
1138           if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
1139             LOG.debug(family.toString() +
1140                 " has more than just info and mapfile: " + infoAndMapfile.length);
1141             return false;
1142           }
1143           // Make sure directory named info or mapfile.
1144           for (int ll = 0; ll < 2; ll++) {
1145             if (infoAndMapfile[ll].getPath().getName().equals("info") ||
1146                 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
1147               continue;
1148             LOG.debug("Unexpected directory name: " +
1149                 infoAndMapfile[ll].getPath());
1150             return false;
1151           }
1152           // Now in family, there are 'mapfile' and 'info' subdirs.  Just
1153           // look in the 'mapfile' subdir.
1154           FileStatus[] familyStatus =
1155               fs.listStatus(new Path(family, "mapfiles"));
1156           if (familyStatus.length > 1) {
1157             LOG.debug(family.toString() + " has " + familyStatus.length +
1158                 " files.");
1159             return false;
1160           }
1161         }
1162       }
1163     }
1164     return true;
1165   }
1166 
1167   /**
1168    * Returns the {@link org.apache.hadoop.fs.Path} object representing the table directory under
1169    * path rootdir
1170    *
1171    * @param rootdir qualified path of HBase root directory
1172    * @param tableName name of table
1173    * @return {@link org.apache.hadoop.fs.Path} for table
1174    */
1175   public static Path getTableDir(Path rootdir, final TableName tableName) {
1176     return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
1177         tableName.getQualifierAsString());
1178   }
1179 
1180   /**
1181    * Returns the {@link org.apache.hadoop.hbase.TableName} object representing
1182    * the table directory under
1183    * path rootdir
1184    *
1185    * @param tablePath path of table
1186    * @return {@link org.apache.hadoop.fs.Path} for table
1187    */
1188   public static TableName getTableName(Path tablePath) {
1189     return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
1190   }
1191 
1192   /**
1193    * Returns the {@link org.apache.hadoop.fs.Path} object representing
1194    * the namespace directory under path rootdir
1195    *
1196    * @param rootdir qualified path of HBase root directory
1197    * @param namespace namespace name
1198    * @return {@link org.apache.hadoop.fs.Path} for table
1199    */
1200   public static Path getNamespaceDir(Path rootdir, final String namespace) {
1201     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1202         new Path(namespace)));
1203   }
1204 
1205   /**
1206    * A {@link PathFilter} that returns only regular files.
1207    */
1208   static class FileFilter implements PathFilter {
1209     private final FileSystem fs;
1210 
1211     public FileFilter(final FileSystem fs) {
1212       this.fs = fs;
1213     }
1214 
1215     @Override
1216     public boolean accept(Path p) {
1217       try {
1218         return fs.isFile(p);
1219       } catch (IOException e) {
1220         LOG.debug("unable to verify if path=" + p + " is a regular file", e);
1221         return false;
1222       }
1223     }
1224   }
1225 
1226   /**
1227    * Directory filter that doesn't include any of the directories in the specified blacklist
1228    */
1229   public static class BlackListDirFilter implements PathFilter {
1230     private final FileSystem fs;
1231     private List<String> blacklist;
1232 
1233     /**
1234      * Create a filter on the give filesystem with the specified blacklist
1235      * @param fs filesystem to filter
1236      * @param directoryNameBlackList list of the names of the directories to filter. If
1237      *          <tt>null</tt>, all directories are returned
1238      */
1239     @SuppressWarnings("unchecked")
1240     public BlackListDirFilter(final FileSystem fs, final List<String> directoryNameBlackList) {
1241       this.fs = fs;
1242       blacklist =
1243         (List<String>) (directoryNameBlackList == null ? Collections.emptyList()
1244           : directoryNameBlackList);
1245     }
1246 
1247     @Override
1248     public boolean accept(Path p) {
1249       boolean isValid = false;
1250       try {
1251         if (blacklist.contains(p.getName().toString())) {
1252           isValid = false;
1253         } else {
1254           isValid = fs.getFileStatus(p).isDirectory();
1255         }
1256       } catch (IOException e) {
1257         LOG.warn("An error occurred while verifying if [" + p.toString()
1258             + "] is a valid directory. Returning 'not valid' and continuing.", e);
1259       }
1260       return isValid;
1261     }
1262   }
1263 
1264   /**
1265    * A {@link PathFilter} that only allows directories.
1266    */
1267   public static class DirFilter extends BlackListDirFilter {
1268 
1269     public DirFilter(FileSystem fs) {
1270       super(fs, null);
1271     }
1272   }
1273 
1274   /**
1275    * A {@link PathFilter} that returns usertable directories. To get all directories use the
1276    * {@link BlackListDirFilter} with a <tt>null</tt> blacklist
1277    */
1278   public static class UserTableDirFilter extends BlackListDirFilter {
1279 
1280     public UserTableDirFilter(FileSystem fs) {
1281       super(fs, HConstants.HBASE_NON_TABLE_DIRS);
1282     }
1283   }
1284 
1285   /**
1286    * Heuristic to determine whether is safe or not to open a file for append
1287    * Looks both for dfs.support.append and use reflection to search
1288    * for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush()
1289    * @param conf
1290    * @return True if append support
1291    */
1292   public static boolean isAppendSupported(final Configuration conf) {
1293     boolean append = conf.getBoolean("dfs.support.append", false);
1294     if (append) {
1295       try {
1296         // TODO: The implementation that comes back when we do a createWriter
1297         // may not be using SequenceFile so the below is not a definitive test.
1298         // Will do for now (hdfs-200).
1299         SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1300         append = true;
1301       } catch (SecurityException e) {
1302       } catch (NoSuchMethodException e) {
1303         append = false;
1304       }
1305     }
1306     if (!append) {
1307       // Look for the 0.21, 0.22, new-style append evidence.
1308       try {
1309         FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1310         append = true;
1311       } catch (NoSuchMethodException e) {
1312         append = false;
1313       }
1314     }
1315     return append;
1316   }
1317 
1318   /**
1319    * @param conf
1320    * @return True if this filesystem whose scheme is 'hdfs'.
1321    * @throws IOException
1322    */
1323   public static boolean isHDFS(final Configuration conf) throws IOException {
1324     FileSystem fs = FileSystem.get(conf);
1325     String scheme = fs.getUri().getScheme();
1326     return scheme.equalsIgnoreCase("hdfs");
1327   }
1328 
1329   /**
1330    * Recover file lease. Used when a file might be suspect
1331    * to be had been left open by another process.
1332    * @param fs FileSystem handle
1333    * @param p Path of file to recover lease
1334    * @param conf Configuration handle
1335    * @throws IOException
1336    */
1337   public abstract void recoverFileLease(final FileSystem fs, final Path p,
1338       Configuration conf, CancelableProgressable reporter) throws IOException;
1339 
1340   public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1341       throws IOException {
1342     List<Path> tableDirs = new LinkedList<Path>();
1343 
1344     for(FileStatus status :
1345         fs.globStatus(new Path(rootdir,
1346             new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
1347       tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
1348     }
1349     return tableDirs;
1350   }
1351 
1352   /**
1353    * @param fs
1354    * @param rootdir
1355    * @return All the table directories under <code>rootdir</code>. Ignore non table hbase folders such as
1356    * .logs, .oldlogs, .corrupt folders.
1357    * @throws IOException
1358    */
1359   public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
1360       throws IOException {
1361     // presumes any directory under hbase.rootdir is a table
1362     FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
1363     List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1364     for (FileStatus dir: dirs) {
1365       tabledirs.add(dir.getPath());
1366     }
1367     return tabledirs;
1368   }
1369 
1370   /**
1371    * Checks if the given path is the one with 'recovered.edits' dir.
1372    * @param path
1373    * @return True if we recovered edits
1374    */
1375   public static boolean isRecoveredEdits(Path path) {
1376     return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
1377   }
1378 
1379   /**
1380    * Filter for all dirs that don't start with '.'
1381    */
1382   public static class RegionDirFilter implements PathFilter {
1383     // This pattern will accept 0.90+ style hex region dirs and older numeric region dir names.
1384     final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1385     final FileSystem fs;
1386 
1387     public RegionDirFilter(FileSystem fs) {
1388       this.fs = fs;
1389     }
1390 
1391     @Override
1392     public boolean accept(Path rd) {
1393       if (!regionDirPattern.matcher(rd.getName()).matches()) {
1394         return false;
1395       }
1396 
1397       try {
1398         return fs.getFileStatus(rd).isDirectory();
1399       } catch (IOException ioe) {
1400         // Maybe the file was moved or the fs was disconnected.
1401         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1402         return false;
1403       }
1404     }
1405   }
1406 
1407   /**
1408    * Given a particular table dir, return all the regiondirs inside it, excluding files such as
1409    * .tableinfo
1410    * @param fs A file system for the Path
1411    * @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir>
1412    * @return List of paths to valid region directories in table dir.
1413    * @throws IOException
1414    */
1415   public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1416     // assumes we are in a table dir.
1417     FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1418     List<Path> regionDirs = new ArrayList<Path>(rds.length);
1419     for (FileStatus rdfs: rds) {
1420       Path rdPath = rdfs.getPath();
1421       regionDirs.add(rdPath);
1422     }
1423     return regionDirs;
1424   }
1425 
1426   /**
1427    * Filter for all dirs that are legal column family names.  This is generally used for colfam
1428    * dirs <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>.
1429    */
1430   public static class FamilyDirFilter implements PathFilter {
1431     final FileSystem fs;
1432 
1433     public FamilyDirFilter(FileSystem fs) {
1434       this.fs = fs;
1435     }
1436 
1437     @Override
1438     public boolean accept(Path rd) {
1439       try {
1440         // throws IAE if invalid
1441         HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1442       } catch (IllegalArgumentException iae) {
1443         // path name is an invalid family name and thus is excluded.
1444         return false;
1445       }
1446 
1447       try {
1448         return fs.getFileStatus(rd).isDirectory();
1449       } catch (IOException ioe) {
1450         // Maybe the file was moved or the fs was disconnected.
1451         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1452         return false;
1453       }
1454     }
1455   }
1456 
1457   /**
1458    * Given a particular region dir, return all the familydirs inside it
1459    *
1460    * @param fs A file system for the Path
1461    * @param regionDir Path to a specific region directory
1462    * @return List of paths to valid family directories in region dir.
1463    * @throws IOException
1464    */
1465   public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1466     // assumes we are in a region dir.
1467     FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1468     List<Path> familyDirs = new ArrayList<Path>(fds.length);
1469     for (FileStatus fdfs: fds) {
1470       Path fdPath = fdfs.getPath();
1471       familyDirs.add(fdPath);
1472     }
1473     return familyDirs;
1474   }
1475 
1476   /**
1477    * Filter for HFiles that excludes reference files.
1478    */
1479   public static class HFileFilter implements PathFilter {
1480     // This pattern will accept 0.90+ style hex hfies files but reject reference files
1481     final public static Pattern hfilePattern = Pattern.compile("^([0-9a-f]+)$");
1482 
1483     final FileSystem fs;
1484 
1485     public HFileFilter(FileSystem fs) {
1486       this.fs = fs;
1487     }
1488 
1489     @Override
1490     public boolean accept(Path rd) {
1491       if (!hfilePattern.matcher(rd.getName()).matches()) {
1492         return false;
1493       }
1494 
1495       try {
1496         // only files
1497         return !fs.getFileStatus(rd).isDirectory();
1498       } catch (IOException ioe) {
1499         // Maybe the file was moved or the fs was disconnected.
1500         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1501         return false;
1502       }
1503     }
1504   }
1505 
1506   /**
1507    * @param conf
1508    * @return Returns the filesystem of the hbase rootdir.
1509    * @throws IOException
1510    */
1511   public static FileSystem getCurrentFileSystem(Configuration conf)
1512   throws IOException {
1513     return getRootDir(conf).getFileSystem(conf);
1514   }
1515 
1516 
1517   /**
1518    * Runs through the HBase rootdir/tablename and creates a reverse lookup map for
1519    * table StoreFile names to the full Path.
1520    * <br>
1521    * Example...<br>
1522    * Key = 3944417774205889744  <br>
1523    * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
1524    *
1525    * @param map map to add values.  If null, this method will create and populate one to return
1526    * @param fs  The file system to use.
1527    * @param hbaseRootDir  The root directory to scan.
1528    * @param tableName name of the table to scan.
1529    * @return Map keyed by StoreFile name with a value of the full Path.
1530    * @throws IOException When scanning the directory fails.
1531    */
1532   public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1533   final FileSystem fs, final Path hbaseRootDir, TableName tableName)
1534   throws IOException {
1535     if (map == null) {
1536       map = new HashMap<String, Path>();
1537     }
1538 
1539     // only include the directory paths to tables
1540     Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
1541     // Inside a table, there are compaction.dir directories to skip.  Otherwise, all else
1542     // should be regions.
1543     PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS);
1544     FileStatus[] regionDirs = fs.listStatus(tableDir);
1545     for (FileStatus regionDir : regionDirs) {
1546       Path dd = regionDir.getPath();
1547       if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1548         continue;
1549       }
1550       // else its a region name, now look in region for families
1551       FileStatus[] familyDirs = fs.listStatus(dd, df);
1552       for (FileStatus familyDir : familyDirs) {
1553         Path family = familyDir.getPath();
1554         // now in family, iterate over the StoreFiles and
1555         // put in map
1556         FileStatus[] familyStatus = fs.listStatus(family);
1557         for (FileStatus sfStatus : familyStatus) {
1558           Path sf = sfStatus.getPath();
1559           map.put( sf.getName(), sf);
1560         }
1561       }
1562     }
1563     return map;
1564   }
1565 
1566 
1567   /**
1568    * Runs through the HBase rootdir and creates a reverse lookup map for
1569    * table StoreFile names to the full Path.
1570    * <br>
1571    * Example...<br>
1572    * Key = 3944417774205889744  <br>
1573    * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
1574    *
1575    * @param fs  The file system to use.
1576    * @param hbaseRootDir  The root directory to scan.
1577    * @return Map keyed by StoreFile name with a value of the full Path.
1578    * @throws IOException When scanning the directory fails.
1579    */
1580   public static Map<String, Path> getTableStoreFilePathMap(
1581     final FileSystem fs, final Path hbaseRootDir)
1582   throws IOException {
1583     Map<String, Path> map = new HashMap<String, Path>();
1584 
1585     // if this method looks similar to 'getTableFragmentation' that is because
1586     // it was borrowed from it.
1587 
1588     // only include the directory paths to tables
1589     for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
1590       getTableStoreFilePathMap(map, fs, hbaseRootDir,
1591           FSUtils.getTableName(tableDir));
1592     }
1593     return map;
1594   }
1595 
1596   /**
1597    * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
1598    * This accommodates differences between hadoop versions, where hadoop 1
1599    * does not throw a FileNotFoundException, and return an empty FileStatus[]
1600    * while Hadoop 2 will throw FileNotFoundException.
1601    *
1602    * @param fs file system
1603    * @param dir directory
1604    * @param filter path filter
1605    * @return null if dir is empty or doesn't exist, otherwise FileStatus array
1606    */
1607   public static FileStatus [] listStatus(final FileSystem fs,
1608       final Path dir, final PathFilter filter) throws IOException {
1609     FileStatus [] status = null;
1610     try {
1611       status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1612     } catch (FileNotFoundException fnfe) {
1613       // if directory doesn't exist, return null
1614       if (LOG.isTraceEnabled()) {
1615         LOG.trace(dir + " doesn't exist");
1616       }
1617     }
1618     if (status == null || status.length < 1) return null;
1619     return status;
1620   }
1621 
1622   /**
1623    * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
1624    * This would accommodates differences between hadoop versions
1625    *
1626    * @param fs file system
1627    * @param dir directory
1628    * @return null if dir is empty or doesn't exist, otherwise FileStatus array
1629    */
1630   public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1631     return listStatus(fs, dir, null);
1632   }
1633 
1634   /**
1635    * Calls fs.delete() and returns the value returned by the fs.delete()
1636    *
1637    * @param fs
1638    * @param path
1639    * @param recursive
1640    * @return the value returned by the fs.delete()
1641    * @throws IOException
1642    */
1643   public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1644       throws IOException {
1645     return fs.delete(path, recursive);
1646   }
1647 
1648   /**
1649    * Calls fs.exists(). Checks if the specified path exists
1650    *
1651    * @param fs
1652    * @param path
1653    * @return the value returned by fs.exists()
1654    * @throws IOException
1655    */
1656   public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1657     return fs.exists(path);
1658   }
1659 
1660   /**
1661    * Throw an exception if an action is not permitted by a user on a file.
1662    *
1663    * @param ugi
1664    *          the user
1665    * @param file
1666    *          the file
1667    * @param action
1668    *          the action
1669    */
1670   public static void checkAccess(UserGroupInformation ugi, FileStatus file,
1671       FsAction action) throws AccessDeniedException {
1672     if (ugi.getShortUserName().equals(file.getOwner())) {
1673       if (file.getPermission().getUserAction().implies(action)) {
1674         return;
1675       }
1676     } else if (contains(ugi.getGroupNames(), file.getGroup())) {
1677       if (file.getPermission().getGroupAction().implies(action)) {
1678         return;
1679       }
1680     } else if (file.getPermission().getOtherAction().implies(action)) {
1681       return;
1682     }
1683     throw new AccessDeniedException("Permission denied:" + " action=" + action
1684         + " path=" + file.getPath() + " user=" + ugi.getShortUserName());
1685   }
1686 
1687   private static boolean contains(String[] groups, String user) {
1688     for (String group : groups) {
1689       if (group.equals(user)) {
1690         return true;
1691       }
1692     }
1693     return false;
1694   }
1695 
1696   /**
1697    * Log the current state of the filesystem from a certain root directory
1698    * @param fs filesystem to investigate
1699    * @param root root file/directory to start logging from
1700    * @param LOG log to output information
1701    * @throws IOException if an unexpected exception occurs
1702    */
1703   public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1704       throws IOException {
1705     LOG.debug("Current file system:");
1706     logFSTree(LOG, fs, root, "|-");
1707   }
1708 
1709   /**
1710    * Recursive helper to log the state of the FS
1711    *
1712    * @see #logFileSystemState(FileSystem, Path, Log)
1713    */
1714   private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1715       throws IOException {
1716     FileStatus[] files = FSUtils.listStatus(fs, root, null);
1717     if (files == null) return;
1718 
1719     for (FileStatus file : files) {
1720       if (file.isDirectory()) {
1721         LOG.debug(prefix + file.getPath().getName() + "/");
1722         logFSTree(LOG, fs, file.getPath(), prefix + "---");
1723       } else {
1724         LOG.debug(prefix + file.getPath().getName());
1725       }
1726     }
1727   }
1728 
1729   public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
1730       throws IOException {
1731     // set the modify time for TimeToLive Cleaner
1732     fs.setTimes(src, EnvironmentEdgeManager.currentTime(), -1);
1733     return fs.rename(src, dest);
1734   }
1735 
1736   /**
1737    * This function is to scan the root path of the file system to get the
1738    * degree of locality for each region on each of the servers having at least
1739    * one block of that region.
1740    * This is used by the tool {@link RegionPlacementMaintainer}
1741    *
1742    * @param conf
1743    *          the configuration to use
1744    * @return the mapping from region encoded name to a map of server names to
1745    *           locality fraction
1746    * @throws IOException
1747    *           in case of file system errors or interrupts
1748    */
1749   public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1750       final Configuration conf) throws IOException {
1751     return getRegionDegreeLocalityMappingFromFS(
1752         conf, null,
1753         conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE));
1754 
1755   }
1756 
1757   /**
1758    * This function is to scan the root path of the file system to get the
1759    * degree of locality for each region on each of the servers having at least
1760    * one block of that region.
1761    *
1762    * @param conf
1763    *          the configuration to use
1764    * @param desiredTable
1765    *          the table you wish to scan locality for
1766    * @param threadPoolSize
1767    *          the thread pool size to use
1768    * @return the mapping from region encoded name to a map of server names to
1769    *           locality fraction
1770    * @throws IOException
1771    *           in case of file system errors or interrupts
1772    */
1773   public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1774       final Configuration conf, final String desiredTable, int threadPoolSize)
1775       throws IOException {
1776     Map<String, Map<String, Float>> regionDegreeLocalityMapping =
1777         new ConcurrentHashMap<String, Map<String, Float>>();
1778     getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null,
1779         regionDegreeLocalityMapping);
1780     return regionDegreeLocalityMapping;
1781   }
1782 
1783   /**
1784    * This function is to scan the root path of the file system to get either the
1785    * mapping between the region name and its best locality region server or the
1786    * degree of locality of each region on each of the servers having at least
1787    * one block of that region. The output map parameters are both optional.
1788    *
1789    * @param conf
1790    *          the configuration to use
1791    * @param desiredTable
1792    *          the table you wish to scan locality for
1793    * @param threadPoolSize
1794    *          the thread pool size to use
1795    * @param regionToBestLocalityRSMapping
1796    *          the map into which to put the best locality mapping or null
1797    * @param regionDegreeLocalityMapping
1798    *          the map into which to put the locality degree mapping or null,
1799    *          must be a thread-safe implementation
1800    * @throws IOException
1801    *           in case of file system errors or interrupts
1802    */
1803   private static void getRegionLocalityMappingFromFS(
1804       final Configuration conf, final String desiredTable,
1805       int threadPoolSize,
1806       Map<String, String> regionToBestLocalityRSMapping,
1807       Map<String, Map<String, Float>> regionDegreeLocalityMapping)
1808       throws IOException {
1809     FileSystem fs =  FileSystem.get(conf);
1810     Path rootPath = FSUtils.getRootDir(conf);
1811     long startTime = EnvironmentEdgeManager.currentTime();
1812     Path queryPath;
1813     // The table files are in ${hbase.rootdir}/data/<namespace>/<table>/*
1814     if (null == desiredTable) {
1815       queryPath = new Path(new Path(rootPath, HConstants.BASE_NAMESPACE_DIR).toString() + "/*/*/*/");
1816     } else {
1817       queryPath = new Path(FSUtils.getTableDir(rootPath, TableName.valueOf(desiredTable)).toString() + "/*/");
1818     }
1819 
1820     // reject all paths that are not appropriate
1821     PathFilter pathFilter = new PathFilter() {
1822       @Override
1823       public boolean accept(Path path) {
1824         // this is the region name; it may get some noise data
1825         if (null == path) {
1826           return false;
1827         }
1828 
1829         // no parent?
1830         Path parent = path.getParent();
1831         if (null == parent) {
1832           return false;
1833         }
1834 
1835         String regionName = path.getName();
1836         if (null == regionName) {
1837           return false;
1838         }
1839 
1840         if (!regionName.toLowerCase().matches("[0-9a-f]+")) {
1841           return false;
1842         }
1843         return true;
1844       }
1845     };
1846 
1847     FileStatus[] statusList = fs.globStatus(queryPath, pathFilter);
1848 
1849     if (null == statusList) {
1850       return;
1851     } else {
1852       LOG.debug("Query Path: " + queryPath + " ; # list of files: " +
1853           statusList.length);
1854     }
1855 
1856     // lower the number of threads in case we have very few expected regions
1857     threadPoolSize = Math.min(threadPoolSize, statusList.length);
1858 
1859     // run in multiple threads
1860     ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadPoolSize,
1861         threadPoolSize, 60, TimeUnit.SECONDS,
1862         new ArrayBlockingQueue<Runnable>(statusList.length));
1863     try {
1864       // ignore all file status items that are not of interest
1865       for (FileStatus regionStatus : statusList) {
1866         if (null == regionStatus) {
1867           continue;
1868         }
1869 
1870         if (!regionStatus.isDirectory()) {
1871           continue;
1872         }
1873 
1874         Path regionPath = regionStatus.getPath();
1875         if (null == regionPath) {
1876           continue;
1877         }
1878 
1879         tpe.execute(new FSRegionScanner(fs, regionPath,
1880             regionToBestLocalityRSMapping, regionDegreeLocalityMapping));
1881       }
1882     } finally {
1883       tpe.shutdown();
1884       int threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY,
1885           60 * 1000);
1886       try {
1887         // here we wait until TPE terminates, which is either naturally or by
1888         // exceptions in the execution of the threads
1889         while (!tpe.awaitTermination(threadWakeFrequency,
1890             TimeUnit.MILLISECONDS)) {
1891           // printing out rough estimate, so as to not introduce
1892           // AtomicInteger
1893           LOG.info("Locality checking is underway: { Scanned Regions : "
1894               + tpe.getCompletedTaskCount() + "/"
1895               + tpe.getTaskCount() + " }");
1896         }
1897       } catch (InterruptedException e) {
1898         throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1899       }
1900     }
1901 
1902     long overhead = EnvironmentEdgeManager.currentTime() - startTime;
1903     String overheadMsg = "Scan DFS for locality info takes " + overhead + " ms";
1904 
1905     LOG.info(overheadMsg);
1906   }
1907 
1908   /**
1909    * Do our short circuit read setup.
1910    * Checks buffer size to use and whether to do checksumming in hbase or hdfs.
1911    * @param conf
1912    */
1913   public static void setupShortCircuitRead(final Configuration conf) {
1914     // Check that the user has not set the "dfs.client.read.shortcircuit.skip.checksum" property.
1915     boolean shortCircuitSkipChecksum =
1916       conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
1917     boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
1918     if (shortCircuitSkipChecksum) {
1919       LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
1920         "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
1921         "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
1922       assert !shortCircuitSkipChecksum; //this will fail if assertions are on
1923     }
1924     checkShortCircuitReadBufferSize(conf);
1925   }
1926 
1927   /**
1928    * Check if short circuit read buffer size is set and if not, set it to hbase value.
1929    * @param conf
1930    */
1931   public static void checkShortCircuitReadBufferSize(final Configuration conf) {
1932     final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
1933     final int notSet = -1;
1934     // DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY is only defined in h2
1935     final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
1936     int size = conf.getInt(dfsKey, notSet);
1937     // If a size is set, return -- we will use it.
1938     if (size != notSet) return;
1939     // But short circuit buffer size is normally not set.  Put in place the hbase wanted size.
1940     int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
1941     conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
1942   }
1943 }