1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.util;
20
21 import java.io.ByteArrayInputStream;
22 import java.io.DataInputStream;
23 import java.io.EOFException;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.io.InputStream;
27 import java.io.InterruptedIOException;
28 import java.lang.reflect.InvocationTargetException;
29 import java.lang.reflect.Method;
30 import java.net.InetSocketAddress;
31 import java.net.URI;
32 import java.net.URISyntaxException;
33 import java.util.ArrayList;
34 import java.util.Collections;
35 import java.util.HashMap;
36 import java.util.LinkedList;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.concurrent.ArrayBlockingQueue;
40 import java.util.concurrent.ConcurrentHashMap;
41 import java.util.concurrent.ThreadPoolExecutor;
42 import java.util.concurrent.TimeUnit;
43 import java.util.regex.Pattern;
44
45 import org.apache.commons.logging.Log;
46 import org.apache.commons.logging.LogFactory;
47 import org.apache.hadoop.hbase.classification.InterfaceAudience;
48 import org.apache.hadoop.HadoopIllegalArgumentException;
49 import org.apache.hadoop.conf.Configuration;
50 import org.apache.hadoop.fs.BlockLocation;
51 import org.apache.hadoop.fs.FSDataInputStream;
52 import org.apache.hadoop.fs.FSDataOutputStream;
53 import org.apache.hadoop.fs.FileStatus;
54 import org.apache.hadoop.fs.FileSystem;
55 import org.apache.hadoop.fs.Path;
56 import org.apache.hadoop.fs.PathFilter;
57 import org.apache.hadoop.fs.permission.FsAction;
58 import org.apache.hadoop.fs.permission.FsPermission;
59 import org.apache.hadoop.hbase.ClusterId;
60 import org.apache.hadoop.hbase.HColumnDescriptor;
61 import org.apache.hadoop.hbase.HConstants;
62 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
63 import org.apache.hadoop.hbase.HRegionInfo;
64 import org.apache.hadoop.hbase.RemoteExceptionHandler;
65 import org.apache.hadoop.hbase.TableName;
66 import org.apache.hadoop.hbase.exceptions.DeserializationException;
67 import org.apache.hadoop.hbase.fs.HFileSystem;
68 import org.apache.hadoop.hbase.master.HMaster;
69 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
70 import org.apache.hadoop.hbase.security.AccessDeniedException;
71 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
72 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
73 import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
74 import org.apache.hadoop.hbase.regionserver.HRegion;
75 import org.apache.hadoop.hdfs.DistributedFileSystem;
76 import org.apache.hadoop.io.IOUtils;
77 import org.apache.hadoop.io.SequenceFile;
78 import org.apache.hadoop.ipc.RemoteException;
79 import org.apache.hadoop.security.UserGroupInformation;
80 import org.apache.hadoop.util.Progressable;
81 import org.apache.hadoop.util.ReflectionUtils;
82 import org.apache.hadoop.util.StringUtils;
83
84 import com.google.common.primitives.Ints;
85
86
87
88
89 @InterfaceAudience.Private
90 public abstract class FSUtils {
91 private static final Log LOG = LogFactory.getLog(FSUtils.class);
92
93
94 public static final String FULL_RWX_PERMISSIONS = "777";
95 private static final String THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize";
96 private static final int DEFAULT_THREAD_POOLSIZE = 2;
97
98
99 public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
100
101 protected FSUtils() {
102 super();
103 }
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121 public static void setStoragePolicy(final FileSystem fs, final Configuration conf,
122 final Path path, final String policyKey, final String defaultPolicy) {
123 String storagePolicy = conf.get(policyKey, defaultPolicy).toUpperCase();
124 if (storagePolicy.equals(defaultPolicy)) {
125 if (LOG.isTraceEnabled()) {
126 LOG.trace("default policy of " + defaultPolicy + " requested, exiting early.");
127 }
128 return;
129 }
130 if (fs instanceof DistributedFileSystem) {
131 DistributedFileSystem dfs = (DistributedFileSystem)fs;
132
133 Class<? extends DistributedFileSystem> dfsClass = dfs.getClass();
134 Method m = null;
135 try {
136 m = dfsClass.getDeclaredMethod("setStoragePolicy",
137 new Class<?>[] { Path.class, String.class });
138 m.setAccessible(true);
139 } catch (NoSuchMethodException e) {
140 LOG.info("FileSystem doesn't support"
141 + " setStoragePolicy; --HDFS-6584 not available");
142 } catch (SecurityException e) {
143 LOG.info("Doesn't have access to setStoragePolicy on "
144 + "FileSystems --HDFS-6584 not available", e);
145 m = null;
146 }
147 if (m != null) {
148 try {
149 m.invoke(dfs, path, storagePolicy);
150 LOG.info("set " + storagePolicy + " for " + path);
151 } catch (Exception e) {
152
153 boolean probablyBadPolicy = false;
154 if (e instanceof InvocationTargetException) {
155 final Throwable exception = e.getCause();
156 if (exception instanceof RemoteException &&
157 HadoopIllegalArgumentException.class.getName().equals(
158 ((RemoteException)exception).getClassName())) {
159 LOG.warn("Given storage policy, '" + storagePolicy + "', was rejected and probably " +
160 "isn't a valid policy for the version of Hadoop you're running. I.e. if you're " +
161 "trying to use SSD related policies then you're likely missing HDFS-7228. For " +
162 "more information see the 'ArchivalStorage' docs for your Hadoop release.");
163 LOG.debug("More information about the invalid storage policy.", exception);
164 probablyBadPolicy = true;
165 }
166 }
167 if (!probablyBadPolicy) {
168
169
170 LOG.warn("Unable to set " + storagePolicy + " for " + path, e);
171 }
172 }
173 }
174 } else {
175 LOG.info("FileSystem isn't an instance of DistributedFileSystem; presuming it doesn't " +
176 "support setStoragePolicy.");
177 }
178 }
179
180
181
182
183
184
185
186
187
188 public static boolean isStartingWithPath(final Path rootPath, final String path) {
189 String uriRootPath = rootPath.toUri().getPath();
190 String tailUriPath = (new Path(path)).toUri().getPath();
191 return tailUriPath.startsWith(uriRootPath);
192 }
193
194
195
196
197
198
199
200
201
202 public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
203 return isMatchingTail(pathToSearch, new Path(pathTail));
204 }
205
206
207
208
209
210
211
212
213
214 public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
215 if (pathToSearch.depth() != pathTail.depth()) return false;
216 Path tailPath = pathTail;
217 String tailName;
218 Path toSearch = pathToSearch;
219 String toSearchName;
220 boolean result = false;
221 do {
222 tailName = tailPath.getName();
223 if (tailName == null || tailName.length() <= 0) {
224 result = true;
225 break;
226 }
227 toSearchName = toSearch.getName();
228 if (toSearchName == null || toSearchName.length() <= 0) break;
229
230 tailPath = tailPath.getParent();
231 toSearch = toSearch.getParent();
232 } while(tailName.equals(toSearchName));
233 return result;
234 }
235
236 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
237 String scheme = fs.getUri().getScheme();
238 if (scheme == null) {
239 LOG.warn("Could not find scheme for uri " +
240 fs.getUri() + ", default to hdfs");
241 scheme = "hdfs";
242 }
243 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
244 scheme + ".impl", FSHDFSUtils.class);
245 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
246 return fsUtils;
247 }
248
249
250
251
252
253
254
255
256 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
257 throws IOException {
258 return fs.exists(dir) && fs.delete(dir, true);
259 }
260
261
262
263
264
265
266
267
268 public static boolean deleteRegionDir(final Configuration conf, final HRegionInfo hri)
269 throws IOException {
270 Path rootDir = getRootDir(conf);
271 FileSystem fs = rootDir.getFileSystem(conf);
272 return deleteDirectory(fs,
273 new Path(getTableDir(rootDir, hri.getTable()), hri.getEncodedName()));
274 }
275
276
277
278
279
280
281
282
283
284
285
286
287 public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
288 Method m = null;
289 Class<? extends FileSystem> cls = fs.getClass();
290 try {
291 m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
292 } catch (NoSuchMethodException e) {
293 LOG.info("FileSystem doesn't support getDefaultBlockSize");
294 } catch (SecurityException e) {
295 LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
296 m = null;
297 }
298 if (m == null) {
299 return fs.getDefaultBlockSize(path);
300 } else {
301 try {
302 Object ret = m.invoke(fs, path);
303 return ((Long)ret).longValue();
304 } catch (Exception e) {
305 throw new IOException(e);
306 }
307 }
308 }
309
310
311
312
313
314
315
316
317
318
319
320
321 public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
322 Method m = null;
323 Class<? extends FileSystem> cls = fs.getClass();
324 try {
325 m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
326 } catch (NoSuchMethodException e) {
327 LOG.info("FileSystem doesn't support getDefaultReplication");
328 } catch (SecurityException e) {
329 LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
330 m = null;
331 }
332 if (m == null) {
333 return fs.getDefaultReplication(path);
334 } else {
335 try {
336 Object ret = m.invoke(fs, path);
337 return ((Number)ret).shortValue();
338 } catch (Exception e) {
339 throw new IOException(e);
340 }
341 }
342 }
343
344
345
346
347
348
349
350
351
352
353
354 public static int getDefaultBufferSize(final FileSystem fs) {
355 return fs.getConf().getInt("io.file.buffer.size", 4096);
356 }
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377 public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path path,
378 FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException {
379 if (fs instanceof HFileSystem) {
380 FileSystem backingFs = ((HFileSystem)fs).getBackingFs();
381 if (backingFs instanceof DistributedFileSystem) {
382
383
384 short replication = Short.parseShort(conf.get(HColumnDescriptor.DFS_REPLICATION,
385 String.valueOf(HColumnDescriptor.DEFAULT_DFS_REPLICATION)));
386 try {
387 return (FSDataOutputStream) (DistributedFileSystem.class.getDeclaredMethod("create",
388 Path.class, FsPermission.class, boolean.class, int.class, short.class, long.class,
389 Progressable.class, InetSocketAddress[].class).invoke(backingFs, path, perm, true,
390 getDefaultBufferSize(backingFs),
391 replication > 0 ? replication : getDefaultReplication(backingFs, path),
392 getDefaultBlockSize(backingFs, path), null, favoredNodes));
393 } catch (InvocationTargetException ite) {
394
395 throw new IOException(ite.getCause());
396 } catch (NoSuchMethodException e) {
397 LOG.debug("DFS Client does not support most favored nodes create; using default create");
398 if (LOG.isTraceEnabled()) LOG.trace("Ignoring; use default create", e);
399 } catch (IllegalArgumentException e) {
400 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
401 } catch (SecurityException e) {
402 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
403 } catch (IllegalAccessException e) {
404 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
405 }
406 }
407 }
408 return create(fs, path, perm, true);
409 }
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428 public static FSDataOutputStream create(FileSystem fs, Path path,
429 FsPermission perm, boolean overwrite) throws IOException {
430 if (LOG.isTraceEnabled()) {
431 LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
432 }
433 return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
434 getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
435 }
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450 public static FsPermission getFilePermissions(final FileSystem fs,
451 final Configuration conf, final String permssionConfKey) {
452 boolean enablePermissions = conf.getBoolean(
453 HConstants.ENABLE_DATA_FILE_UMASK, false);
454
455 if (enablePermissions) {
456 try {
457 FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
458
459 String mask = conf.get(permssionConfKey);
460 if (mask == null)
461 return FsPermission.getFileDefault();
462
463 FsPermission umask = new FsPermission(mask);
464 return perm.applyUMask(umask);
465 } catch (IllegalArgumentException e) {
466 LOG.warn(
467 "Incorrect umask attempted to be created: "
468 + conf.get(permssionConfKey)
469 + ", using default file permissions.", e);
470 return FsPermission.getFileDefault();
471 }
472 }
473 return FsPermission.getFileDefault();
474 }
475
476
477
478
479
480
481
482 public static void checkFileSystemAvailable(final FileSystem fs)
483 throws IOException {
484 if (!(fs instanceof DistributedFileSystem)) {
485 return;
486 }
487 IOException exception = null;
488 DistributedFileSystem dfs = (DistributedFileSystem) fs;
489 try {
490 if (dfs.exists(new Path("/"))) {
491 return;
492 }
493 } catch (IOException e) {
494 exception = RemoteExceptionHandler.checkIOException(e);
495 }
496 try {
497 fs.close();
498 } catch (Exception e) {
499 LOG.error("file system close failed: ", e);
500 }
501 IOException io = new IOException("File system is not available");
502 io.initCause(exception);
503 throw io;
504 }
505
506
507
508
509
510
511
512
513
514 private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
515 boolean inSafeMode = false;
516 try {
517 Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
518 org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction.class, boolean.class});
519 inSafeMode = (Boolean) m.invoke(dfs,
520 org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction.SAFEMODE_GET, true);
521 } catch (Exception e) {
522 if (e instanceof IOException) throw (IOException) e;
523
524
525 inSafeMode = dfs.setSafeMode(
526 org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction.SAFEMODE_GET);
527 }
528 return inSafeMode;
529 }
530
531
532
533
534
535
536 public static void checkDfsSafeMode(final Configuration conf)
537 throws IOException {
538 boolean isInSafeMode = false;
539 FileSystem fs = FileSystem.get(conf);
540 if (fs instanceof DistributedFileSystem) {
541 DistributedFileSystem dfs = (DistributedFileSystem)fs;
542 isInSafeMode = isInSafeMode(dfs);
543 }
544 if (isInSafeMode) {
545 throw new IOException("File system is in safemode, it can't be written now");
546 }
547 }
548
549
550
551
552
553
554
555
556
557
558 public static String getVersion(FileSystem fs, Path rootdir)
559 throws IOException, DeserializationException {
560 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
561 FileStatus[] status = null;
562 try {
563
564
565 status = fs.listStatus(versionFile);
566 } catch (FileNotFoundException fnfe) {
567 return null;
568 }
569 if (status == null || status.length == 0) return null;
570 String version = null;
571 byte [] content = new byte [(int)status[0].getLen()];
572 FSDataInputStream s = fs.open(versionFile);
573 try {
574 IOUtils.readFully(s, content, 0, content.length);
575 if (ProtobufUtil.isPBMagicPrefix(content)) {
576 version = parseVersionFrom(content);
577 } else {
578
579 InputStream is = new ByteArrayInputStream(content);
580 DataInputStream dis = new DataInputStream(is);
581 try {
582 version = dis.readUTF();
583 } finally {
584 dis.close();
585 }
586 }
587 } catch (EOFException eof) {
588 LOG.warn("Version file was empty, odd, will try to set it.");
589 } finally {
590 s.close();
591 }
592 return version;
593 }
594
595
596
597
598
599
600
601 static String parseVersionFrom(final byte [] bytes)
602 throws DeserializationException {
603 ProtobufUtil.expectPBMagicPrefix(bytes);
604 int pblen = ProtobufUtil.lengthOfPBMagic();
605 FSProtos.HBaseVersionFileContent.Builder builder =
606 FSProtos.HBaseVersionFileContent.newBuilder();
607 try {
608 ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
609 return builder.getVersion();
610 } catch (IOException e) {
611
612 throw new DeserializationException(e);
613 }
614 }
615
616
617
618
619
620
621 static byte [] toVersionByteArray(final String version) {
622 FSProtos.HBaseVersionFileContent.Builder builder =
623 FSProtos.HBaseVersionFileContent.newBuilder();
624 return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
625 }
626
627
628
629
630
631
632
633
634
635
636
637 public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
638 throws IOException, DeserializationException {
639 checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
640 }
641
642
643
644
645
646
647
648
649
650
651
652
653
654 public static void checkVersion(FileSystem fs, Path rootdir,
655 boolean message, int wait, int retries)
656 throws IOException, DeserializationException {
657 String version = getVersion(fs, rootdir);
658 if (version == null) {
659 if (!metaRegionExists(fs, rootdir)) {
660
661
662 setVersion(fs, rootdir, wait, retries);
663 return;
664 }
665 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return;
666
667
668
669 String msg = "HBase file layout needs to be upgraded."
670 + " You have version " + version
671 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
672 + ". Consult http://hbase.apache.org/book.html for further information about upgrading HBase."
673 + " Is your hbase.rootdir valid? If so, you may need to run "
674 + "'hbase hbck -fixVersionFile'.";
675 if (message) {
676 System.out.println("WARNING! " + msg);
677 }
678 throw new FileSystemVersionException(msg);
679 }
680
681
682
683
684
685
686
687
688 public static void setVersion(FileSystem fs, Path rootdir)
689 throws IOException {
690 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
691 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
692 }
693
694
695
696
697
698
699
700
701
702
703 public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
704 throws IOException {
705 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
706 }
707
708
709
710
711
712
713
714
715
716
717
718
719 public static void setVersion(FileSystem fs, Path rootdir, String version,
720 int wait, int retries) throws IOException {
721 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
722 Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR +
723 HConstants.VERSION_FILE_NAME);
724 while (true) {
725 try {
726
727 FSDataOutputStream s = fs.create(tempVersionFile);
728 try {
729 s.write(toVersionByteArray(version));
730 s.close();
731 s = null;
732
733
734 if (!fs.rename(tempVersionFile, versionFile)) {
735 throw new IOException("Unable to move temp version file to " + versionFile);
736 }
737 } finally {
738
739
740
741
742
743 try {
744 if (s != null) s.close();
745 } catch (IOException ignore) { }
746 }
747 LOG.info("Created version file at " + rootdir.toString() + " with version=" + version);
748 return;
749 } catch (IOException e) {
750 if (retries > 0) {
751 LOG.debug("Unable to create version file at " + rootdir.toString() + ", retrying", e);
752 fs.delete(versionFile, false);
753 try {
754 if (wait > 0) {
755 Thread.sleep(wait);
756 }
757 } catch (InterruptedException ie) {
758 throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
759 }
760 retries--;
761 } else {
762 throw e;
763 }
764 }
765 }
766 }
767
768
769
770
771
772
773
774
775
776 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
777 int wait) throws IOException {
778 while (true) {
779 try {
780 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
781 return fs.exists(filePath);
782 } catch (IOException ioe) {
783 if (wait > 0) {
784 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
785 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
786 try {
787 Thread.sleep(wait);
788 } catch (InterruptedException e) {
789 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
790 }
791 } else {
792 throw ioe;
793 }
794 }
795 }
796 }
797
798
799
800
801
802
803
804
805 public static ClusterId getClusterId(FileSystem fs, Path rootdir)
806 throws IOException {
807 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
808 ClusterId clusterId = null;
809 FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null;
810 if (status != null) {
811 int len = Ints.checkedCast(status.getLen());
812 byte [] content = new byte[len];
813 FSDataInputStream in = fs.open(idPath);
814 try {
815 in.readFully(content);
816 } catch (EOFException eof) {
817 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
818 } finally{
819 in.close();
820 }
821 try {
822 clusterId = ClusterId.parseFrom(content);
823 } catch (DeserializationException e) {
824 throw new IOException("content=" + Bytes.toString(content), e);
825 }
826
827 if (!ProtobufUtil.isPBMagicPrefix(content)) {
828 String cid = null;
829 in = fs.open(idPath);
830 try {
831 cid = in.readUTF();
832 clusterId = new ClusterId(cid);
833 } catch (EOFException eof) {
834 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
835 } finally {
836 in.close();
837 }
838 rewriteAsPb(fs, rootdir, idPath, clusterId);
839 }
840 return clusterId;
841 } else {
842 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
843 }
844 return clusterId;
845 }
846
847
848
849
850
851 private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
852 final ClusterId cid)
853 throws IOException {
854
855
856 Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
857 if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
858 setClusterId(fs, rootdir, cid, 100);
859 if (!fs.delete(movedAsideName, false)) {
860 throw new IOException("Failed delete of " + movedAsideName);
861 }
862 LOG.debug("Rewrote the hbase.id file as pb");
863 }
864
865
866
867
868
869
870
871
872
873
874 public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
875 int wait) throws IOException {
876 while (true) {
877 try {
878 Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
879 Path tempIdFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY +
880 Path.SEPARATOR + HConstants.CLUSTER_ID_FILE_NAME);
881
882 FSDataOutputStream s = fs.create(tempIdFile);
883 try {
884 s.write(clusterId.toByteArray());
885 s.close();
886 s = null;
887
888
889 if (!fs.rename(tempIdFile, idFile)) {
890 throw new IOException("Unable to move temp version file to " + idFile);
891 }
892 } finally {
893
894 try {
895 if (s != null) s.close();
896 } catch (IOException ignore) { }
897 }
898 if (LOG.isDebugEnabled()) {
899 LOG.debug("Created cluster ID file at " + idFile.toString() + " with ID: " + clusterId);
900 }
901 return;
902 } catch (IOException ioe) {
903 if (wait > 0) {
904 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
905 ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
906 try {
907 Thread.sleep(wait);
908 } catch (InterruptedException e) {
909 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
910 }
911 } else {
912 throw ioe;
913 }
914 }
915 }
916 }
917
918
919
920
921
922
923
924
925 public static Path validateRootPath(Path root) throws IOException {
926 try {
927 URI rootURI = new URI(root.toString());
928 String scheme = rootURI.getScheme();
929 if (scheme == null) {
930 throw new IOException("Root directory does not have a scheme");
931 }
932 return root;
933 } catch (URISyntaxException e) {
934 IOException io = new IOException("Root directory path is not a valid " +
935 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
936 io.initCause(e);
937 throw io;
938 }
939 }
940
941
942
943
944
945
946
947
948
949 public static String removeRootPath(Path path, final Configuration conf) throws IOException {
950 Path root = FSUtils.getRootDir(conf);
951 String pathStr = path.toString();
952
953 if (!pathStr.startsWith(root.toString())) return pathStr;
954
955 return pathStr.substring(root.toString().length() + 1);
956 }
957
958
959
960
961
962
963
964 public static void waitOnSafeMode(final Configuration conf,
965 final long wait)
966 throws IOException {
967 FileSystem fs = FileSystem.get(conf);
968 if (!(fs instanceof DistributedFileSystem)) return;
969 DistributedFileSystem dfs = (DistributedFileSystem)fs;
970
971 while (isInSafeMode(dfs)) {
972 LOG.info("Waiting for dfs to exit safe mode...");
973 try {
974 Thread.sleep(wait);
975 } catch (InterruptedException e) {
976 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
977 }
978 }
979 }
980
981
982
983
984
985
986
987
988
989
990
991 public static String getPath(Path p) {
992 return p.toUri().getPath();
993 }
994
995
996
997
998
999
1000
1001 public static Path getRootDir(final Configuration c) throws IOException {
1002 Path p = new Path(c.get(HConstants.HBASE_DIR));
1003 FileSystem fs = p.getFileSystem(c);
1004 return p.makeQualified(fs);
1005 }
1006
1007 public static void setRootDir(final Configuration c, final Path root) throws IOException {
1008 c.set(HConstants.HBASE_DIR, root.toString());
1009 }
1010
1011 public static void setFsDefault(final Configuration c, final Path root) throws IOException {
1012 c.set("fs.defaultFS", root.toString());
1013 }
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023 @SuppressWarnings("deprecation")
1024 public static boolean metaRegionExists(FileSystem fs, Path rootdir)
1025 throws IOException {
1026 Path metaRegionDir =
1027 HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
1028 return fs.exists(metaRegionDir);
1029 }
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
1040 final FileSystem fs, FileStatus status, long start, long length)
1041 throws IOException {
1042 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
1043 BlockLocation [] blockLocations =
1044 fs.getFileBlockLocations(status, start, length);
1045 for(BlockLocation bl : blockLocations) {
1046 String [] hosts = bl.getHosts();
1047 long len = bl.getLength();
1048 blocksDistribution.addHostsAndBlockWeight(hosts, len);
1049 }
1050
1051 return blocksDistribution;
1052 }
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 public static boolean isMajorCompacted(final FileSystem fs,
1066 final Path hbaseRootDir)
1067 throws IOException {
1068 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1069 PathFilter regionFilter = new RegionDirFilter(fs);
1070 PathFilter familyFilter = new FamilyDirFilter(fs);
1071 for (Path d : tableDirs) {
1072 FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
1073 for (FileStatus regionDir : regionDirs) {
1074 Path dd = regionDir.getPath();
1075
1076 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1077 for (FileStatus familyDir : familyDirs) {
1078 Path family = familyDir.getPath();
1079
1080 FileStatus[] familyStatus = fs.listStatus(family);
1081 if (familyStatus.length > 1) {
1082 LOG.debug(family.toString() + " has " + familyStatus.length +
1083 " files.");
1084 return false;
1085 }
1086 }
1087 }
1088 }
1089 return true;
1090 }
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 public static int getTotalTableFragmentation(final HMaster master)
1102 throws IOException {
1103 Map<String, Integer> map = getTableFragmentation(master);
1104 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
1105 }
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117 public static Map<String, Integer> getTableFragmentation(
1118 final HMaster master)
1119 throws IOException {
1120 Path path = getRootDir(master.getConfiguration());
1121
1122 FileSystem fs = path.getFileSystem(master.getConfiguration());
1123 return getTableFragmentation(fs, path);
1124 }
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136 public static Map<String, Integer> getTableFragmentation(
1137 final FileSystem fs, final Path hbaseRootDir)
1138 throws IOException {
1139 Map<String, Integer> frags = new HashMap<String, Integer>();
1140 int cfCountTotal = 0;
1141 int cfFragTotal = 0;
1142 PathFilter regionFilter = new RegionDirFilter(fs);
1143 PathFilter familyFilter = new FamilyDirFilter(fs);
1144 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1145 for (Path d : tableDirs) {
1146 int cfCount = 0;
1147 int cfFrag = 0;
1148 FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
1149 for (FileStatus regionDir : regionDirs) {
1150 Path dd = regionDir.getPath();
1151
1152 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1153 for (FileStatus familyDir : familyDirs) {
1154 cfCount++;
1155 cfCountTotal++;
1156 Path family = familyDir.getPath();
1157
1158 FileStatus[] familyStatus = fs.listStatus(family);
1159 if (familyStatus.length > 1) {
1160 cfFrag++;
1161 cfFragTotal++;
1162 }
1163 }
1164 }
1165
1166 frags.put(FSUtils.getTableName(d).getNameAsString(),
1167 cfCount == 0? 0: Math.round((float) cfFrag / cfCount * 100));
1168 }
1169
1170 frags.put("-TOTAL-",
1171 cfCountTotal == 0? 0: Math.round((float) cfFragTotal / cfCountTotal * 100));
1172 return frags;
1173 }
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 public static Path getTableDir(Path rootdir, final TableName tableName) {
1184 return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
1185 tableName.getQualifierAsString());
1186 }
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196 public static TableName getTableName(Path tablePath) {
1197 return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
1198 }
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208 public static Path getNamespaceDir(Path rootdir, final String namespace) {
1209 return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1210 new Path(namespace)));
1211 }
1212
1213
1214
1215
1216 static class FileFilter implements PathFilter {
1217 private final FileSystem fs;
1218
1219 public FileFilter(final FileSystem fs) {
1220 this.fs = fs;
1221 }
1222
1223 @Override
1224 public boolean accept(Path p) {
1225 try {
1226 return fs.isFile(p);
1227 } catch (IOException e) {
1228 LOG.debug("unable to verify if path=" + p + " is a regular file", e);
1229 return false;
1230 }
1231 }
1232 }
1233
1234
1235
1236
1237 public static class BlackListDirFilter implements PathFilter {
1238 private final FileSystem fs;
1239 private List<String> blacklist;
1240
1241
1242
1243
1244
1245
1246
1247 @SuppressWarnings("unchecked")
1248 public BlackListDirFilter(final FileSystem fs, final List<String> directoryNameBlackList) {
1249 this.fs = fs;
1250 blacklist =
1251 (List<String>) (directoryNameBlackList == null ? Collections.emptyList()
1252 : directoryNameBlackList);
1253 }
1254
1255 @Override
1256 public boolean accept(Path p) {
1257 boolean isValid = false;
1258 try {
1259 if (isValidName(p.getName())) {
1260 isValid = fs.getFileStatus(p).isDirectory();
1261 } else {
1262 isValid = false;
1263 }
1264 } catch (IOException e) {
1265 LOG.warn("An error occurred while verifying if [" + p.toString()
1266 + "] is a valid directory. Returning 'not valid' and continuing.", e);
1267 }
1268 return isValid;
1269 }
1270
1271 protected boolean isValidName(final String name) {
1272 return !blacklist.contains(name);
1273 }
1274 }
1275
1276
1277
1278
1279 public static class DirFilter extends BlackListDirFilter {
1280
1281 public DirFilter(FileSystem fs) {
1282 super(fs, null);
1283 }
1284 }
1285
1286
1287
1288
1289
1290 public static class UserTableDirFilter extends BlackListDirFilter {
1291 public UserTableDirFilter(FileSystem fs) {
1292 super(fs, HConstants.HBASE_NON_TABLE_DIRS);
1293 }
1294
1295 protected boolean isValidName(final String name) {
1296 if (!super.isValidName(name))
1297 return false;
1298
1299 try {
1300 TableName.isLegalTableQualifierName(Bytes.toBytes(name));
1301 } catch (IllegalArgumentException e) {
1302 LOG.info("INVALID NAME " + name);
1303 return false;
1304 }
1305 return true;
1306 }
1307 }
1308
1309
1310
1311
1312
1313
1314
1315
1316 public static boolean isAppendSupported(final Configuration conf) {
1317 boolean append = conf.getBoolean("dfs.support.append", false);
1318 if (append) {
1319 try {
1320
1321
1322
1323 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1324 append = true;
1325 } catch (SecurityException e) {
1326 } catch (NoSuchMethodException e) {
1327 append = false;
1328 }
1329 }
1330 if (!append) {
1331
1332 try {
1333 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1334 append = true;
1335 } catch (NoSuchMethodException e) {
1336 append = false;
1337 }
1338 }
1339 return append;
1340 }
1341
1342
1343
1344
1345
1346
1347 public static boolean isHDFS(final Configuration conf) throws IOException {
1348 FileSystem fs = FileSystem.get(conf);
1349 String scheme = fs.getUri().getScheme();
1350 return scheme.equalsIgnoreCase("hdfs");
1351 }
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361 public abstract void recoverFileLease(final FileSystem fs, final Path p,
1362 Configuration conf, CancelableProgressable reporter) throws IOException;
1363
1364 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1365 throws IOException {
1366 List<Path> tableDirs = new LinkedList<Path>();
1367
1368 for(FileStatus status :
1369 fs.globStatus(new Path(rootdir,
1370 new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
1371 tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
1372 }
1373 return tableDirs;
1374 }
1375
1376
1377
1378
1379
1380
1381
1382
1383 public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
1384 throws IOException {
1385
1386 FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
1387 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1388 for (FileStatus dir: dirs) {
1389 tabledirs.add(dir.getPath());
1390 }
1391 return tabledirs;
1392 }
1393
1394
1395
1396
1397
1398
1399 public static boolean isRecoveredEdits(Path path) {
1400 return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
1401 }
1402
1403
1404
1405
1406 public static class RegionDirFilter implements PathFilter {
1407
1408 final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1409 final FileSystem fs;
1410
1411 public RegionDirFilter(FileSystem fs) {
1412 this.fs = fs;
1413 }
1414
1415 @Override
1416 public boolean accept(Path rd) {
1417 if (!regionDirPattern.matcher(rd.getName()).matches()) {
1418 return false;
1419 }
1420
1421 try {
1422 return fs.getFileStatus(rd).isDirectory();
1423 } catch (IOException ioe) {
1424
1425 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1426 return false;
1427 }
1428 }
1429 }
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439 public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1440
1441 FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1442 List<Path> regionDirs = new ArrayList<Path>(rds.length);
1443 for (FileStatus rdfs: rds) {
1444 Path rdPath = rdfs.getPath();
1445 regionDirs.add(rdPath);
1446 }
1447 return regionDirs;
1448 }
1449
1450
1451
1452
1453
1454 public static class FamilyDirFilter implements PathFilter {
1455 final FileSystem fs;
1456
1457 public FamilyDirFilter(FileSystem fs) {
1458 this.fs = fs;
1459 }
1460
1461 @Override
1462 public boolean accept(Path rd) {
1463 try {
1464
1465 HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1466 } catch (IllegalArgumentException iae) {
1467
1468 return false;
1469 }
1470
1471 try {
1472 return fs.getFileStatus(rd).isDirectory();
1473 } catch (IOException ioe) {
1474
1475 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1476 return false;
1477 }
1478 }
1479 }
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489 public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1490
1491 FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1492 List<Path> familyDirs = new ArrayList<Path>(fds.length);
1493 for (FileStatus fdfs: fds) {
1494 Path fdPath = fdfs.getPath();
1495 familyDirs.add(fdPath);
1496 }
1497 return familyDirs;
1498 }
1499
1500 public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
1501 FileStatus[] fds = fs.listStatus(familyDir, new ReferenceFileFilter(fs));
1502 List<Path> referenceFiles = new ArrayList<Path>(fds.length);
1503 for (FileStatus fdfs: fds) {
1504 Path fdPath = fdfs.getPath();
1505 referenceFiles.add(fdPath);
1506 }
1507 return referenceFiles;
1508 }
1509
1510
1511
1512
1513 public static class HFileFilter implements PathFilter {
1514 final FileSystem fs;
1515
1516 public HFileFilter(FileSystem fs) {
1517 this.fs = fs;
1518 }
1519
1520 @Override
1521 public boolean accept(Path rd) {
1522 try {
1523
1524 return !fs.getFileStatus(rd).isDirectory() && StoreFileInfo.isHFile(rd);
1525 } catch (IOException ioe) {
1526
1527 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1528 return false;
1529 }
1530 }
1531 }
1532
1533 public static class ReferenceFileFilter implements PathFilter {
1534
1535 private final FileSystem fs;
1536
1537 public ReferenceFileFilter(FileSystem fs) {
1538 this.fs = fs;
1539 }
1540
1541 @Override
1542 public boolean accept(Path rd) {
1543 try {
1544
1545 return !fs.getFileStatus(rd).isDirectory() && StoreFileInfo.isReference(rd);
1546 } catch (IOException ioe) {
1547
1548 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1549 return false;
1550 }
1551 }
1552 }
1553
1554
1555
1556
1557
1558
1559
1560 public static FileSystem getCurrentFileSystem(Configuration conf)
1561 throws IOException {
1562 return getRootDir(conf).getFileSystem(conf);
1563 }
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581 public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1582 final FileSystem fs, final Path hbaseRootDir, TableName tableName)
1583 throws IOException {
1584 return getTableStoreFilePathMap(map, fs, hbaseRootDir, tableName, null);
1585 }
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603 public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1604 final FileSystem fs, final Path hbaseRootDir, TableName tableName, ErrorReporter errors)
1605 throws IOException {
1606 if (map == null) {
1607 map = new HashMap<String, Path>();
1608 }
1609
1610
1611 Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
1612
1613
1614 PathFilter familyFilter = new FamilyDirFilter(fs);
1615 FileStatus[] regionDirs = fs.listStatus(tableDir, new RegionDirFilter(fs));
1616 for (FileStatus regionDir : regionDirs) {
1617 if (null != errors) {
1618 errors.progress();
1619 }
1620 Path dd = regionDir.getPath();
1621
1622 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1623 for (FileStatus familyDir : familyDirs) {
1624 if (null != errors) {
1625 errors.progress();
1626 }
1627 Path family = familyDir.getPath();
1628 if (family.getName().equals(HConstants.RECOVERED_EDITS_DIR)) {
1629 continue;
1630 }
1631
1632
1633 FileStatus[] familyStatus = fs.listStatus(family);
1634 for (FileStatus sfStatus : familyStatus) {
1635 if (null != errors) {
1636 errors.progress();
1637 }
1638 Path sf = sfStatus.getPath();
1639 map.put( sf.getName(), sf);
1640 }
1641 }
1642 }
1643 return map;
1644 }
1645
1646 public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) {
1647 int result = 0;
1648 try {
1649 for (Path familyDir:getFamilyDirs(fs, p)){
1650 result += getReferenceFilePaths(fs, familyDir).size();
1651 }
1652 } catch (IOException e) {
1653 LOG.warn("Error Counting reference files.", e);
1654 }
1655 return result;
1656 }
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671 public static Map<String, Path> getTableStoreFilePathMap(
1672 final FileSystem fs, final Path hbaseRootDir)
1673 throws IOException {
1674 return getTableStoreFilePathMap(fs, hbaseRootDir, null);
1675 }
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691 public static Map<String, Path> getTableStoreFilePathMap(
1692 final FileSystem fs, final Path hbaseRootDir, ErrorReporter errors)
1693 throws IOException {
1694 Map<String, Path> map = new HashMap<String, Path>();
1695
1696
1697
1698
1699
1700 for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
1701 getTableStoreFilePathMap(map, fs, hbaseRootDir,
1702 FSUtils.getTableName(tableDir), errors);
1703 }
1704 return map;
1705 }
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718 public static FileStatus [] listStatus(final FileSystem fs,
1719 final Path dir, final PathFilter filter) throws IOException {
1720 FileStatus [] status = null;
1721 try {
1722 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1723 } catch (FileNotFoundException fnfe) {
1724
1725 if (LOG.isTraceEnabled()) {
1726 LOG.trace(dir + " doesn't exist");
1727 }
1728 }
1729 if (status == null || status.length < 1) return null;
1730 return status;
1731 }
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741 public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1742 return listStatus(fs, dir, null);
1743 }
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754 public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1755 throws IOException {
1756 return fs.delete(path, recursive);
1757 }
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767 public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1768 return fs.exists(path);
1769 }
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781 public static void checkAccess(UserGroupInformation ugi, FileStatus file,
1782 FsAction action) throws AccessDeniedException {
1783 if (ugi.getShortUserName().equals(file.getOwner())) {
1784 if (file.getPermission().getUserAction().implies(action)) {
1785 return;
1786 }
1787 } else if (contains(ugi.getGroupNames(), file.getGroup())) {
1788 if (file.getPermission().getGroupAction().implies(action)) {
1789 return;
1790 }
1791 } else if (file.getPermission().getOtherAction().implies(action)) {
1792 return;
1793 }
1794 throw new AccessDeniedException("Permission denied:" + " action=" + action
1795 + " path=" + file.getPath() + " user=" + ugi.getShortUserName());
1796 }
1797
1798 private static boolean contains(String[] groups, String user) {
1799 for (String group : groups) {
1800 if (group.equals(user)) {
1801 return true;
1802 }
1803 }
1804 return false;
1805 }
1806
1807
1808
1809
1810
1811
1812
1813
1814 public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1815 throws IOException {
1816 LOG.debug("Current file system:");
1817 logFSTree(LOG, fs, root, "|-");
1818 }
1819
1820
1821
1822
1823
1824
1825 private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1826 throws IOException {
1827 FileStatus[] files = FSUtils.listStatus(fs, root, null);
1828 if (files == null) return;
1829
1830 for (FileStatus file : files) {
1831 if (file.isDirectory()) {
1832 LOG.debug(prefix + file.getPath().getName() + "/");
1833 logFSTree(LOG, fs, file.getPath(), prefix + "---");
1834 } else {
1835 LOG.debug(prefix + file.getPath().getName());
1836 }
1837 }
1838 }
1839
1840 public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
1841 throws IOException {
1842
1843 fs.setTimes(src, EnvironmentEdgeManager.currentTime(), -1);
1844 return fs.rename(src, dest);
1845 }
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1861 final Configuration conf) throws IOException {
1862 return getRegionDegreeLocalityMappingFromFS(
1863 conf, null,
1864 conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE));
1865
1866 }
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1885 final Configuration conf, final String desiredTable, int threadPoolSize)
1886 throws IOException {
1887 Map<String, Map<String, Float>> regionDegreeLocalityMapping =
1888 new ConcurrentHashMap<String, Map<String, Float>>();
1889 getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null,
1890 regionDegreeLocalityMapping);
1891 return regionDegreeLocalityMapping;
1892 }
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914 private static void getRegionLocalityMappingFromFS(
1915 final Configuration conf, final String desiredTable,
1916 int threadPoolSize,
1917 Map<String, String> regionToBestLocalityRSMapping,
1918 Map<String, Map<String, Float>> regionDegreeLocalityMapping)
1919 throws IOException {
1920 FileSystem fs = FileSystem.get(conf);
1921 Path rootPath = FSUtils.getRootDir(conf);
1922 long startTime = EnvironmentEdgeManager.currentTime();
1923 Path queryPath;
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024 public static void setupShortCircuitRead(final Configuration conf) {
2025
2026 boolean shortCircuitSkipChecksum =
2027 conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
2028 boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
2029 if (shortCircuitSkipChecksum) {
2030 LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
2031 "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
2032 "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
2033 assert !shortCircuitSkipChecksum;
2034 }
2035 checkShortCircuitReadBufferSize(conf);
2036 }
2037
2038
2039
2040
2041
2042 public static void checkShortCircuitReadBufferSize(final Configuration conf) {
2043 final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
2044 final int notSet = -1;
2045
2046 final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
2047 int size = conf.getInt(dfsKey, notSet);
2048
2049 if (size != notSet) return;
2050
2051 int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
2052 conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
2053 }
2054 }