@InterfaceAudience.Private public class HFileSystem extends org.apache.hadoop.fs.FilterFileSystem
Modifier and Type | Class and Description |
---|---|
(package private) static interface |
HFileSystem.ReorderBlocks
Interface to implement to add a specific reordering logic in hdfs.
|
(package private) static class |
HFileSystem.ReorderWALBlocks
We're putting at lowest priority the wal files blocks that are on the same datanode
as the original regionserver which created these files.
|
Modifier and Type | Field and Description |
---|---|
static org.slf4j.Logger |
LOG |
private org.apache.hadoop.fs.FileSystem |
noChecksumFs |
private static byte |
unspecifiedStoragePolicyId |
private boolean |
useHBaseChecksum |
Constructor and Description |
---|
HFileSystem(org.apache.hadoop.conf.Configuration conf,
boolean useHBaseChecksum)
Create a FileSystem object for HBase regionservers.
|
HFileSystem(org.apache.hadoop.fs.FileSystem fs)
Wrap a FileSystem object within a HFileSystem.
|
Modifier and Type | Method and Description |
---|---|
static boolean |
addLocationsOrderInterceptor(org.apache.hadoop.conf.Configuration conf) |
(package private) static boolean |
addLocationsOrderInterceptor(org.apache.hadoop.conf.Configuration conf,
HFileSystem.ReorderBlocks lrb)
Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient
linked to this FileSystem.
|
void |
close()
Close this filesystem object
|
org.apache.hadoop.fs.FSDataOutputStream |
createNonRecursive(org.apache.hadoop.fs.Path f,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress)
The org.apache.hadoop.fs.FilterFileSystem does not yet support
createNonRecursive.
|
private static org.apache.hadoop.hdfs.protocol.ClientProtocol |
createReorderingProxy(org.apache.hadoop.hdfs.protocol.ClientProtocol cp,
HFileSystem.ReorderBlocks lrb,
org.apache.hadoop.conf.Configuration conf) |
static org.apache.hadoop.fs.FileSystem |
get(org.apache.hadoop.conf.Configuration conf)
Create a new HFileSystem object, similar to FileSystem.get().
|
org.apache.hadoop.fs.FileSystem |
getBackingFs()
Returns the underlying filesystem
|
static org.apache.hadoop.fs.FileSystem |
getLocalFs(org.apache.hadoop.conf.Configuration conf)
Wrap a LocalFileSystem within a HFileSystem.
|
org.apache.hadoop.fs.FileSystem |
getNoChecksumFs()
Returns the filesystem that is specially setup for
doing reads from storage.
|
private String |
getStoragePolicyForOldHDFSVersion(org.apache.hadoop.fs.Path path)
Before Hadoop 2.8.0, there's no getStoragePolicy method for FileSystem interface, and we need
to keep compatible with it.
|
String |
getStoragePolicyName(org.apache.hadoop.fs.Path path)
Get the storage policy of the source path (directory/file).
|
private org.apache.hadoop.fs.FileSystem |
maybeWrapFileSystem(org.apache.hadoop.fs.FileSystem base,
org.apache.hadoop.conf.Configuration conf)
Returns an instance of Filesystem wrapped into the class specified in
hbase.fs.wrapper property, if one is set in the configuration, returns
unmodified FS instance passed in as an argument otherwise.
|
private static org.apache.hadoop.fs.FileSystem |
newInstanceFileSystem(org.apache.hadoop.conf.Configuration conf)
Returns a brand new instance of the FileSystem.
|
void |
setStoragePolicy(org.apache.hadoop.fs.Path path,
String policyName)
Set the source path (directory/file) to the specified storage policy.
|
boolean |
useHBaseChecksum()
Are we verifying checksums in HBase?
|
access, append, canonicalizeUri, checkPath, completeLocalOutput, concat, copyFromLocalFile, copyFromLocalFile, copyFromLocalFile, copyToLocalFile, create, create, createNonRecursive, createSnapshot, createSymlink, delete, deleteSnapshot, getAclStatus, getCanonicalUri, getChildFileSystems, getConf, getDefaultBlockSize, getDefaultBlockSize, getDefaultReplication, getDefaultReplication, getFileBlockLocations, getFileChecksum, getFileChecksum, getFileLinkStatus, getFileStatus, getHomeDirectory, getInitialWorkingDirectory, getLinkTarget, getRawFileSystem, getServerDefaults, getServerDefaults, getStatus, getUri, getUsed, getWorkingDirectory, getXAttr, getXAttrs, getXAttrs, initialize, listCorruptFileBlocks, listLocatedStatus, listStatus, listStatusIterator, listXAttrs, makeQualified, mkdirs, modifyAclEntries, open, primitiveCreate, primitiveMkdir, removeAcl, removeAclEntries, removeDefaultAcl, removeXAttr, rename, rename, renameSnapshot, resolveLink, resolvePath, setAcl, setOwner, setPermission, setReplication, setTimes, setVerifyChecksum, setWorkingDirectory, setWriteChecksum, setXAttr, setXAttr, startLocalOutput, supportsSymlinks, truncate
addDelegationTokens, append, append, areSymlinksEnabled, cancelDeleteOnExit, clearStatistics, closeAll, closeAllForUGI, copyFromLocalFile, copyToLocalFile, copyToLocalFile, create, create, create, create, create, create, create, create, create, create, create, createNewFile, createNonRecursive, createSnapshot, delete, deleteOnExit, enableSymlinks, exists, fixRelativePart, get, get, getAllStatistics, getBlockSize, getCanonicalServiceName, getContentSummary, getDefaultPort, getDefaultUri, getDelegationToken, getFileBlockLocations, getFileSystemClass, getFSofPath, getLength, getLocal, getName, getNamed, getReplication, getScheme, getStatistics, getStatistics, getStatus, globStatus, globStatus, isDirectory, isFile, listFiles, listLocatedStatus, listStatus, listStatus, listStatus, mkdirs, mkdirs, moveFromLocalFile, moveFromLocalFile, moveToLocalFile, newInstance, newInstance, newInstance, newInstanceLocal, open, primitiveMkdir, printStatistics, processDeleteOnExit, setDefaultUri, setDefaultUri
public static final org.slf4j.Logger LOG
private final org.apache.hadoop.fs.FileSystem noChecksumFs
private final boolean useHBaseChecksum
private static volatile byte unspecifiedStoragePolicyId
public HFileSystem(org.apache.hadoop.conf.Configuration conf, boolean useHBaseChecksum) throws IOException
conf
- The configuration to be used for the filesystemuseHBaseChecksum
- if true, then use
checksum verfication in hbase, otherwise
delegate checksum verification to the FileSystem.IOException
public HFileSystem(org.apache.hadoop.fs.FileSystem fs)
fs
- Set the noChecksumFs and writeFs to this specified filesystem.public org.apache.hadoop.fs.FileSystem getNoChecksumFs()
public org.apache.hadoop.fs.FileSystem getBackingFs() throws IOException
IOException
public void setStoragePolicy(org.apache.hadoop.fs.Path path, String policyName)
path
- The source path (directory/file).policyName
- The name of the storage policy: 'HOT', 'COLD', etc.
See see hadoop 2.6+ org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g
'COLD', 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.@Nullable public String getStoragePolicyName(org.apache.hadoop.fs.Path path)
path
- The source path (directory/file).null
if not using DistributedFileSystem
or
exception thrown when trying to get policyprivate String getStoragePolicyForOldHDFSVersion(org.apache.hadoop.fs.Path path)
path
- Path to get storage policy againstpublic boolean useHBaseChecksum()
public void close() throws IOException
close
in interface Closeable
close
in interface AutoCloseable
close
in class org.apache.hadoop.fs.FilterFileSystem
IOException
private static org.apache.hadoop.fs.FileSystem newInstanceFileSystem(org.apache.hadoop.conf.Configuration conf) throws IOException
conf
- ConfigurationIOException
private org.apache.hadoop.fs.FileSystem maybeWrapFileSystem(org.apache.hadoop.fs.FileSystem base, org.apache.hadoop.conf.Configuration conf)
base
- Filesystem instance to wrapconf
- Configurationpublic static boolean addLocationsOrderInterceptor(org.apache.hadoop.conf.Configuration conf) throws IOException
IOException
static boolean addLocationsOrderInterceptor(org.apache.hadoop.conf.Configuration conf, HFileSystem.ReorderBlocks lrb)
private static org.apache.hadoop.hdfs.protocol.ClientProtocol createReorderingProxy(org.apache.hadoop.hdfs.protocol.ClientProtocol cp, HFileSystem.ReorderBlocks lrb, org.apache.hadoop.conf.Configuration conf)
public static org.apache.hadoop.fs.FileSystem get(org.apache.hadoop.conf.Configuration conf) throws IOException
IOException
public static org.apache.hadoop.fs.FileSystem getLocalFs(org.apache.hadoop.conf.Configuration conf) throws IOException
IOException
public org.apache.hadoop.fs.FSDataOutputStream createNonRecursive(org.apache.hadoop.fs.Path f, boolean overwrite, int bufferSize, short replication, long blockSize, org.apache.hadoop.util.Progressable progress) throws IOException
createNonRecursive
in class org.apache.hadoop.fs.FileSystem
IOException
Copyright © 2007–2019 The Apache Software Foundation. All rights reserved.