@InterfaceAudience.Private public abstract class FSUtils extends Object
Modifier and Type | Class and Description |
---|---|
static class |
FSUtils.BlackListDirFilter
Directory filter that doesn't include any of the directories in the specified blacklist
|
static class |
FSUtils.DirFilter
A
PathFilter that only allows directories. |
static class |
FSUtils.FamilyDirFilter
Filter for all dirs that are legal column family names.
|
(package private) static class |
FSUtils.FileFilter
A
PathFilter that returns only regular files. |
static class |
FSUtils.HFileFilter
Filter for HFiles that excludes reference files.
|
static class |
FSUtils.ReferenceFileFilter |
static class |
FSUtils.RegionDirFilter
Filter for all dirs that don't start with '.'
|
static class |
FSUtils.UserTableDirFilter
A
PathFilter that returns usertable directories. |
Modifier and Type | Field and Description |
---|---|
private static int |
DEFAULT_THREAD_POOLSIZE |
static String |
FULL_RWX_PERMISSIONS
Full access permissions (starting point for a umask)
|
private static org.apache.commons.logging.Log |
LOG |
private static String |
THREAD_POOLSIZE |
static boolean |
WINDOWS
Set to true on Windows platforms
|
Modifier | Constructor and Description |
---|---|
protected |
FSUtils() |
Modifier and Type | Method and Description |
---|---|
static void |
checkAccess(org.apache.hadoop.security.UserGroupInformation ugi,
org.apache.hadoop.fs.FileStatus file,
org.apache.hadoop.fs.permission.FsAction action)
Throw an exception if an action is not permitted by a user on a file.
|
static boolean |
checkClusterIdExists(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
int wait)
Checks that a cluster ID file exists in the HBase root directory
|
static void |
checkDfsSafeMode(org.apache.hadoop.conf.Configuration conf)
Check whether dfs is in safemode.
|
static void |
checkFileSystemAvailable(org.apache.hadoop.fs.FileSystem fs)
Checks to see if the specified file system is available
|
static void |
checkShortCircuitReadBufferSize(org.apache.hadoop.conf.Configuration conf)
Check if short circuit read buffer size is set and if not, set it to hbase value.
|
static void |
checkVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
boolean message)
Verifies current version of file system
|
static void |
checkVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
boolean message,
int wait,
int retries)
Verifies current version of file system
|
static HDFSBlocksDistribution |
computeHDFSBlocksDistribution(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.FileStatus status,
long start,
long length)
Compute HDFS blocks distribution of a given file, or a portion of the file
|
private static boolean |
contains(String[] groups,
String user) |
static org.apache.hadoop.fs.FSDataOutputStream |
create(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission perm,
InetSocketAddress[] favoredNodes)
Create the specified file on the filesystem.
|
static org.apache.hadoop.fs.FSDataOutputStream |
create(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission perm,
boolean overwrite)
Create the specified file on the filesystem.
|
static boolean |
delete(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
boolean recursive)
Calls fs.delete() and returns the value returned by the fs.delete()
|
static boolean |
deleteDirectory(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path dir)
Delete if exists.
|
static boolean |
deleteRegionDir(org.apache.hadoop.conf.Configuration conf,
HRegionInfo hri)
Delete the region directory if exists.
|
static ClusterId |
getClusterId(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
Returns the value of the unique cluster ID stored for this HBase instance.
|
static org.apache.hadoop.fs.FileSystem |
getCurrentFileSystem(org.apache.hadoop.conf.Configuration conf) |
static long |
getDefaultBlockSize(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
Return the number of bytes that large input files should be optimally
be split into to minimize i/o time.
|
static int |
getDefaultBufferSize(org.apache.hadoop.fs.FileSystem fs)
Returns the default buffer size to use during writes.
|
static short |
getDefaultReplication(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path) |
static List<org.apache.hadoop.fs.Path> |
getFamilyDirs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path regionDir)
Given a particular region dir, return all the familydirs inside it
|
static org.apache.hadoop.fs.permission.FsPermission |
getFilePermissions(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.conf.Configuration conf,
String permssionConfKey)
Get the file permissions specified in the configuration, if they are
enabled.
|
static FSUtils |
getInstance(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.conf.Configuration conf) |
static List<org.apache.hadoop.fs.Path> |
getLocalTableDirs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir) |
static org.apache.hadoop.fs.Path |
getNamespaceDir(org.apache.hadoop.fs.Path rootdir,
String namespace)
Returns the
Path object representing
the namespace directory under path rootdir |
static String |
getPath(org.apache.hadoop.fs.Path p)
Return the 'path' component of a Path.
|
static List<org.apache.hadoop.fs.Path> |
getReferenceFilePaths(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path familyDir) |
static Map<String,Map<String,Float>> |
getRegionDegreeLocalityMappingFromFS(org.apache.hadoop.conf.Configuration conf)
This function is to scan the root path of the file system to get the
degree of locality for each region on each of the servers having at least
one block of that region.
|
static Map<String,Map<String,Float>> |
getRegionDegreeLocalityMappingFromFS(org.apache.hadoop.conf.Configuration conf,
String desiredTable,
int threadPoolSize)
This function is to scan the root path of the file system to get the
degree of locality for each region on each of the servers having at least
one block of that region.
|
static List<org.apache.hadoop.fs.Path> |
getRegionDirs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path tableDir)
Given a particular table dir, return all the regiondirs inside it, excluding files such as
.tableinfo
|
private static void |
getRegionLocalityMappingFromFS(org.apache.hadoop.conf.Configuration conf,
String desiredTable,
int threadPoolSize,
Map<String,String> regionToBestLocalityRSMapping,
Map<String,Map<String,Float>> regionDegreeLocalityMapping)
This function is to scan the root path of the file system to get either the
mapping between the region name and its best locality region server or the
degree of locality of each region on each of the servers having at least
one block of that region.
|
static int |
getRegionReferenceFileCount(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path p) |
static org.apache.hadoop.fs.Path |
getRootDir(org.apache.hadoop.conf.Configuration c) |
static org.apache.hadoop.fs.Path |
getTableDir(org.apache.hadoop.fs.Path rootdir,
TableName tableName)
Returns the
Path object representing the table directory under
path rootdir |
static List<org.apache.hadoop.fs.Path> |
getTableDirs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir) |
static Map<String,Integer> |
getTableFragmentation(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir)
Runs through the HBase rootdir and checks how many stores for each table
have more than one file in them.
|
static Map<String,Integer> |
getTableFragmentation(HMaster master)
Runs through the HBase rootdir and checks how many stores for each table
have more than one file in them.
|
static TableName |
getTableName(org.apache.hadoop.fs.Path tablePath)
Returns the
TableName object representing
the table directory under
path rootdir |
static Map<String,org.apache.hadoop.fs.Path> |
getTableStoreFilePathMap(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir)
Runs through the HBase rootdir and creates a reverse lookup map for
table StoreFile names to the full Path.
|
static Map<String,org.apache.hadoop.fs.Path> |
getTableStoreFilePathMap(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir,
HBaseFsck.ErrorReporter errors)
Runs through the HBase rootdir and creates a reverse lookup map for
table StoreFile names to the full Path.
|
static Map<String,org.apache.hadoop.fs.Path> |
getTableStoreFilePathMap(Map<String,org.apache.hadoop.fs.Path> map,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir,
TableName tableName)
Runs through the HBase rootdir/tablename and creates a reverse lookup map for
table StoreFile names to the full Path.
|
static Map<String,org.apache.hadoop.fs.Path> |
getTableStoreFilePathMap(Map<String,org.apache.hadoop.fs.Path> map,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir,
TableName tableName,
HBaseFsck.ErrorReporter errors)
Runs through the HBase rootdir/tablename and creates a reverse lookup map for
table StoreFile names to the full Path.
|
static int |
getTotalTableFragmentation(HMaster master)
Returns the total overall fragmentation percentage.
|
static String |
getVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
Verifies current version of file system
|
static boolean |
isAppendSupported(org.apache.hadoop.conf.Configuration conf)
Heuristic to determine whether is safe or not to open a file for append
Looks both for dfs.support.append and use reflection to search
for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush()
|
static boolean |
isExists(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
Calls fs.exists().
|
static boolean |
isHDFS(org.apache.hadoop.conf.Configuration conf) |
private static boolean |
isInSafeMode(org.apache.hadoop.hdfs.DistributedFileSystem dfs)
We use reflection because
DistributedFileSystem.setSafeMode(
HdfsConstants.SafeModeAction action, boolean isChecked) is not in hadoop 1.1 |
static boolean |
isMajorCompacted(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir)
Runs through the hbase rootdir and checks all stores have only
one file in them -- that is, they've been major compacted.
|
static boolean |
isMatchingTail(org.apache.hadoop.fs.Path pathToSearch,
org.apache.hadoop.fs.Path pathTail)
Compare path component of the Path URI; e.g.
|
static boolean |
isMatchingTail(org.apache.hadoop.fs.Path pathToSearch,
String pathTail)
Compare path component of the Path URI; e.g.
|
static boolean |
isRecoveredEdits(org.apache.hadoop.fs.Path path)
Checks if the given path is the one with 'recovered.edits' dir.
|
static boolean |
isStartingWithPath(org.apache.hadoop.fs.Path rootPath,
String path)
Compare of path component.
|
static org.apache.hadoop.fs.FileStatus[] |
listStatus(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path dir)
Calls fs.listStatus() and treats FileNotFoundException as non-fatal
This would accommodates differences between hadoop versions
|
static org.apache.hadoop.fs.FileStatus[] |
listStatus(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path dir,
org.apache.hadoop.fs.PathFilter filter)
Calls fs.listStatus() and treats FileNotFoundException as non-fatal
This accommodates differences between hadoop versions, where hadoop 1
does not throw a FileNotFoundException, and return an empty FileStatus[]
while Hadoop 2 will throw FileNotFoundException.
|
static void |
logFileSystemState(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path root,
org.apache.commons.logging.Log LOG)
Log the current state of the filesystem from a certain root directory
|
private static void |
logFSTree(org.apache.commons.logging.Log LOG,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path root,
String prefix)
Recursive helper to log the state of the FS
|
static boolean |
metaRegionExists(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
Checks if meta region exists
|
(package private) static String |
parseVersionFrom(byte[] bytes)
Parse the content of the ${HBASE_ROOTDIR}/hbase.version file.
|
abstract void |
recoverFileLease(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path p,
org.apache.hadoop.conf.Configuration conf,
CancelableProgressable reporter)
Recover file lease.
|
static String |
removeRootPath(org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration conf)
Checks for the presence of the root path (using the provided conf object) in the given path.
|
static boolean |
renameAndSetModifyTime(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dest) |
private static void |
rewriteAsPb(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
org.apache.hadoop.fs.Path p,
ClusterId cid) |
static void |
setClusterId(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
ClusterId clusterId,
int wait)
Writes a new unique identifier for this cluster to the "hbase.id" file
in the HBase root directory
|
static void |
setFsDefault(org.apache.hadoop.conf.Configuration c,
org.apache.hadoop.fs.Path root) |
static void |
setRootDir(org.apache.hadoop.conf.Configuration c,
org.apache.hadoop.fs.Path root) |
static void |
setStoragePolicy(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.Path path,
String policyKey,
String defaultPolicy)
Sets storage policy for given path according to config setting.
|
static void |
setupShortCircuitRead(org.apache.hadoop.conf.Configuration conf)
Do our short circuit read setup.
|
static void |
setVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
Sets version of file system
|
static void |
setVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
int wait,
int retries)
Sets version of file system
|
static void |
setVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
String version,
int wait,
int retries)
Sets version of file system
|
(package private) static byte[] |
toVersionByteArray(String version)
Create the content to write into the ${HBASE_ROOTDIR}/hbase.version file.
|
static org.apache.hadoop.fs.Path |
validateRootPath(org.apache.hadoop.fs.Path root)
Verifies root directory path is a valid URI with a scheme
|
static void |
waitOnSafeMode(org.apache.hadoop.conf.Configuration conf,
long wait)
If DFS, check safe mode and if so, wait until we clear it.
|
private static final org.apache.commons.logging.Log LOG
public static final String FULL_RWX_PERMISSIONS
private static final String THREAD_POOLSIZE
private static final int DEFAULT_THREAD_POOLSIZE
public static final boolean WINDOWS
public static void setStoragePolicy(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.Path path, String policyKey, String defaultPolicy)
fs
- We only do anything if an instance of DistributedFileSystemconf
- used to look up storage policy with given key; not modified.path
- the Path whose storage policy is to be setpolicyKey
- e.g. HConstants.WAL_STORAGE_POLICYdefaultPolicy
- usually should be the policy NONE to delegate to HDFSpublic static boolean isStartingWithPath(org.apache.hadoop.fs.Path rootPath, String path)
path
starts with rootPath
,
then the function returns truerootPath
- path
- path
starts with rootPath
public static boolean isMatchingTail(org.apache.hadoop.fs.Path pathToSearch, String pathTail)
pathToSearch
- Path we will be trying to match.pathTail
- pathTail
is tail on the path of pathToSearch
public static boolean isMatchingTail(org.apache.hadoop.fs.Path pathToSearch, org.apache.hadoop.fs.Path pathTail)
pathToSearch
- Path we will be trying to match.pathTail
- pathTail
is tail on the path of pathToSearch
public static FSUtils getInstance(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.conf.Configuration conf)
public static boolean deleteDirectory(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path dir) throws IOException
fs
- filesystem objectdir
- directory to deletedir
IOException
- epublic static boolean deleteRegionDir(org.apache.hadoop.conf.Configuration conf, HRegionInfo hri) throws IOException
conf
- hri
- IOException
public static long getDefaultBlockSize(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path) throws IOException
fs
- filesystem objectIOException
- epublic static short getDefaultReplication(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path) throws IOException
IOException
public static int getDefaultBufferSize(org.apache.hadoop.fs.FileSystem fs)
fs
- filesystem objectpublic static org.apache.hadoop.fs.FSDataOutputStream create(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path, org.apache.hadoop.fs.permission.FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException
HColumnDescriptor.DEFAULT_DFS_REPLICATION
conf
- configurationsfs
- FileSystem
on which to write the filepath
- Path
to the file to writeperm
- permissionsfavoredNodes
- IOException
- if the file cannot be createdpublic static org.apache.hadoop.fs.FSDataOutputStream create(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path, org.apache.hadoop.fs.permission.FsPermission perm, boolean overwrite) throws IOException
fs
- FileSystem
on which to write the filepath
- Path
to the file to writeperm
- overwrite
- Whether or not the created file should be overwritten.IOException
- if the file cannot be createdpublic static org.apache.hadoop.fs.permission.FsPermission getFilePermissions(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.conf.Configuration conf, String permssionConfKey)
fs
- filesystem that the file will be created on.conf
- configuration to read for determining if permissions are
enabled and which to usepermssionConfKey
- property key in the configuration to use when
finding the permissionpublic static void checkFileSystemAvailable(org.apache.hadoop.fs.FileSystem fs) throws IOException
fs
- filesystemIOException
- eprivate static boolean isInSafeMode(org.apache.hadoop.hdfs.DistributedFileSystem dfs) throws IOException
DistributedFileSystem.setSafeMode(
HdfsConstants.SafeModeAction action, boolean isChecked)
is not in hadoop 1.1dfs
- IOException
public static void checkDfsSafeMode(org.apache.hadoop.conf.Configuration conf) throws IOException
conf
- IOException
public static String getVersion(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootdir) throws IOException, DeserializationException
fs
- filesystem objectrootdir
- root hbase directoryIOException
- eDeserializationException
static String parseVersionFrom(byte[] bytes) throws DeserializationException
bytes
- The byte content of the hbase.version file.DeserializationException
static byte[] toVersionByteArray(String version)
version
- Version to persistversion
content and a bit of pb magic for a prefix.public static void checkVersion(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootdir, boolean message) throws IOException, DeserializationException
fs
- file systemrootdir
- root directory of HBase installationmessage
- if true, issues a message on System.outIOException
- eDeserializationException
public static void checkVersion(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootdir, boolean message, int wait, int retries) throws IOException, DeserializationException
fs
- file systemrootdir
- root directory of HBase installationmessage
- if true, issues a message on System.outwait
- wait intervalretries
- number of times to retryIOException
- eDeserializationException
public static void setVersion(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootdir) throws IOException
fs
- filesystem objectrootdir
- hbase rootIOException
- epublic static void setVersion(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootdir, int wait, int retries) throws IOException
fs
- filesystem objectrootdir
- hbase rootwait
- time to wait for retryretries
- number of times to retry before failingIOException
- epublic static void setVersion(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootdir, String version, int wait, int retries) throws IOException
fs
- filesystem objectrootdir
- hbase root directoryversion
- version to setwait
- time to wait for retryretries
- number of times to retry before throwing an IOExceptionIOException
- epublic static boolean checkClusterIdExists(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootdir, int wait) throws IOException
fs
- the root directory FileSystemrootdir
- the HBase root directory in HDFSwait
- how long to wait between retriestrue
if the file exists, otherwise false
IOException
- if checking the FileSystem failspublic static ClusterId getClusterId(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootdir) throws IOException
fs
- the root directory FileSystemrootdir
- the path to the HBase root directoryIOException
- if reading the cluster ID file failsprivate static void rewriteAsPb(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootdir, org.apache.hadoop.fs.Path p, ClusterId cid) throws IOException
cid
- IOException
public static void setClusterId(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootdir, ClusterId clusterId, int wait) throws IOException
fs
- the root directory FileSystemrootdir
- the path to the HBase root directoryclusterId
- the unique identifier to storewait
- how long (in milliseconds) to wait between retriesIOException
- if writing to the FileSystem fails and no wait valuepublic static org.apache.hadoop.fs.Path validateRootPath(org.apache.hadoop.fs.Path root) throws IOException
root
- root directory pathroot
argument.IOException
- if not a valid URI with a schemepublic static String removeRootPath(org.apache.hadoop.fs.Path path, org.apache.hadoop.conf.Configuration conf) throws IOException
path
- conf
- IOException
public static void waitOnSafeMode(org.apache.hadoop.conf.Configuration conf, long wait) throws IOException
conf
- configurationwait
- Sleep between retriesIOException
- epublic static String getPath(org.apache.hadoop.fs.Path p)
hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir
,
this method returns /hbase_trunk/TestTable/compaction.dir
.
This method is useful if you want to print out a Path without qualifying
Filesystem instance.p
- Filesystem Path whose 'path' component we are to return.public static org.apache.hadoop.fs.Path getRootDir(org.apache.hadoop.conf.Configuration c) throws IOException
c
- configurationhbase.rootdir
from
configuration as a qualified Path.IOException
- epublic static void setRootDir(org.apache.hadoop.conf.Configuration c, org.apache.hadoop.fs.Path root) throws IOException
IOException
public static void setFsDefault(org.apache.hadoop.conf.Configuration c, org.apache.hadoop.fs.Path root) throws IOException
IOException
public static boolean metaRegionExists(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootdir) throws IOException
fs
- file systemrootdir
- root directory of HBase installationIOException
- epublic static HDFSBlocksDistribution computeHDFSBlocksDistribution(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.FileStatus status, long start, long length) throws IOException
fs
- file systemstatus
- file status of the filestart
- start position of the portionlength
- length of the portionIOException
public static boolean isMajorCompacted(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path hbaseRootDir) throws IOException
fs
- filesystemhbaseRootDir
- hbase root directoryIOException
- epublic static int getTotalTableFragmentation(HMaster master) throws IOException
master
- The master defining the HBase root and file system.IOException
- When scanning the directory fails.public static Map<String,Integer> getTableFragmentation(HMaster master) throws IOException
master
- The master defining the HBase root and file system.IOException
- When scanning the directory fails.public static Map<String,Integer> getTableFragmentation(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path hbaseRootDir) throws IOException
fs
- The file system to use.hbaseRootDir
- The root directory to scan.IOException
- When scanning the directory fails.public static org.apache.hadoop.fs.Path getTableDir(org.apache.hadoop.fs.Path rootdir, TableName tableName)
Path
object representing the table directory under
path rootdirrootdir
- qualified path of HBase root directorytableName
- name of tablePath
for tablepublic static TableName getTableName(org.apache.hadoop.fs.Path tablePath)
TableName
object representing
the table directory under
path rootdirtablePath
- path of tablePath
for tablepublic static org.apache.hadoop.fs.Path getNamespaceDir(org.apache.hadoop.fs.Path rootdir, String namespace)
Path
object representing
the namespace directory under path rootdirrootdir
- qualified path of HBase root directorynamespace
- namespace namePath
for tablepublic static boolean isAppendSupported(org.apache.hadoop.conf.Configuration conf)
conf
- public static boolean isHDFS(org.apache.hadoop.conf.Configuration conf) throws IOException
conf
- IOException
public abstract void recoverFileLease(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path p, org.apache.hadoop.conf.Configuration conf, CancelableProgressable reporter) throws IOException
fs
- FileSystem handlep
- Path of file to recover leaseconf
- Configuration handleIOException
public static List<org.apache.hadoop.fs.Path> getTableDirs(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootdir) throws IOException
IOException
public static List<org.apache.hadoop.fs.Path> getLocalTableDirs(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootdir) throws IOException
fs
- rootdir
- rootdir
. Ignore non table hbase folders such as
.logs, .oldlogs, .corrupt folders.IOException
public static boolean isRecoveredEdits(org.apache.hadoop.fs.Path path)
path
- public static List<org.apache.hadoop.fs.Path> getRegionDirs(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path tableDir) throws IOException
fs
- A file system for the PathtableDir
- Path to a specific table directory <hbase.rootdir>/<tabledir>IOException
public static List<org.apache.hadoop.fs.Path> getFamilyDirs(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path regionDir) throws IOException
fs
- A file system for the PathregionDir
- Path to a specific region directoryIOException
public static List<org.apache.hadoop.fs.Path> getReferenceFilePaths(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path familyDir) throws IOException
IOException
public static org.apache.hadoop.fs.FileSystem getCurrentFileSystem(org.apache.hadoop.conf.Configuration conf) throws IOException
conf
- IOException
public static Map<String,org.apache.hadoop.fs.Path> getTableStoreFilePathMap(Map<String,org.apache.hadoop.fs.Path> map, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path hbaseRootDir, TableName tableName) throws IOException
map
- map to add values. If null, this method will create and populate one to returnfs
- The file system to use.hbaseRootDir
- The root directory to scan.tableName
- name of the table to scan.IOException
- When scanning the directory fails.public static Map<String,org.apache.hadoop.fs.Path> getTableStoreFilePathMap(Map<String,org.apache.hadoop.fs.Path> map, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path hbaseRootDir, TableName tableName, HBaseFsck.ErrorReporter errors) throws IOException
map
- map to add values. If null, this method will create and populate one to returnfs
- The file system to use.hbaseRootDir
- The root directory to scan.tableName
- name of the table to scan.errors
- ErrorReporter instance or nullIOException
- When scanning the directory fails.public static int getRegionReferenceFileCount(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path p)
public static Map<String,org.apache.hadoop.fs.Path> getTableStoreFilePathMap(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path hbaseRootDir) throws IOException
fs
- The file system to use.hbaseRootDir
- The root directory to scan.IOException
- When scanning the directory fails.public static Map<String,org.apache.hadoop.fs.Path> getTableStoreFilePathMap(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path hbaseRootDir, HBaseFsck.ErrorReporter errors) throws IOException
fs
- The file system to use.hbaseRootDir
- The root directory to scan.errors
- ErrorReporter instance or nullIOException
- When scanning the directory fails.public static org.apache.hadoop.fs.FileStatus[] listStatus(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path dir, org.apache.hadoop.fs.PathFilter filter) throws IOException
fs
- file systemdir
- directoryfilter
- path filterIOException
public static org.apache.hadoop.fs.FileStatus[] listStatus(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path dir) throws IOException
fs
- file systemdir
- directoryIOException
public static boolean delete(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path, boolean recursive) throws IOException
fs
- path
- recursive
- IOException
public static boolean isExists(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path) throws IOException
fs
- path
- IOException
public static void checkAccess(org.apache.hadoop.security.UserGroupInformation ugi, org.apache.hadoop.fs.FileStatus file, org.apache.hadoop.fs.permission.FsAction action) throws AccessDeniedException
ugi
- the userfile
- the fileaction
- the actionAccessDeniedException
public static void logFileSystemState(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path root, org.apache.commons.logging.Log LOG) throws IOException
fs
- filesystem to investigateroot
- root file/directory to start logging fromLOG
- log to output informationIOException
- if an unexpected exception occursprivate static void logFSTree(org.apache.commons.logging.Log LOG, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path root, String prefix) throws IOException
IOException
logFileSystemState(FileSystem, Path, Log)
public static boolean renameAndSetModifyTime(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.Path dest) throws IOException
IOException
public static Map<String,Map<String,Float>> getRegionDegreeLocalityMappingFromFS(org.apache.hadoop.conf.Configuration conf) throws IOException
RegionPlacementMaintainer
conf
- the configuration to useIOException
- in case of file system errors or interruptspublic static Map<String,Map<String,Float>> getRegionDegreeLocalityMappingFromFS(org.apache.hadoop.conf.Configuration conf, String desiredTable, int threadPoolSize) throws IOException
conf
- the configuration to usedesiredTable
- the table you wish to scan locality forthreadPoolSize
- the thread pool size to useIOException
- in case of file system errors or interruptsprivate static void getRegionLocalityMappingFromFS(org.apache.hadoop.conf.Configuration conf, String desiredTable, int threadPoolSize, Map<String,String> regionToBestLocalityRSMapping, Map<String,Map<String,Float>> regionDegreeLocalityMapping) throws IOException
conf
- the configuration to usedesiredTable
- the table you wish to scan locality forthreadPoolSize
- the thread pool size to useregionToBestLocalityRSMapping
- the map into which to put the best locality mapping or nullregionDegreeLocalityMapping
- the map into which to put the locality degree mapping or null,
must be a thread-safe implementationIOException
- in case of file system errors or interruptspublic static void setupShortCircuitRead(org.apache.hadoop.conf.Configuration conf)
conf
- public static void checkShortCircuitReadBufferSize(org.apache.hadoop.conf.Configuration conf)
conf
- Copyright © 2007–2019 The Apache Software Foundation. All rights reserved.