001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.fs;
019
020import static org.apache.hadoop.hbase.util.LocatedBlockHelper.getLocatedBlockLocations;
021
022import edu.umd.cs.findbugs.annotations.Nullable;
023import java.io.Closeable;
024import java.io.IOException;
025import java.lang.reflect.Field;
026import java.lang.reflect.InvocationHandler;
027import java.lang.reflect.InvocationTargetException;
028import java.lang.reflect.Method;
029import java.lang.reflect.Modifier;
030import java.lang.reflect.Proxy;
031import java.lang.reflect.UndeclaredThrowableException;
032import java.net.URI;
033import org.apache.hadoop.conf.Configuration;
034import org.apache.hadoop.fs.FSDataOutputStream;
035import org.apache.hadoop.fs.FileSystem;
036import org.apache.hadoop.fs.FilterFileSystem;
037import org.apache.hadoop.fs.LocalFileSystem;
038import org.apache.hadoop.fs.Path;
039import org.apache.hadoop.hbase.ServerName;
040import org.apache.hadoop.hbase.util.CommonFSUtils;
041import org.apache.hadoop.hbase.util.ReflectionUtils;
042import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
043import org.apache.hadoop.hdfs.DFSClient;
044import org.apache.hadoop.hdfs.DistributedFileSystem;
045import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
046import org.apache.hadoop.hdfs.protocol.ClientProtocol;
047import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
048import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
049import org.apache.hadoop.hdfs.protocol.LocatedBlock;
050import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
051import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
052import org.apache.hadoop.ipc.RPC;
053import org.apache.hadoop.util.Progressable;
054import org.apache.yetus.audience.InterfaceAudience;
055import org.slf4j.Logger;
056import org.slf4j.LoggerFactory;
057
058/**
059 * An encapsulation for the FileSystem object that hbase uses to access data. This class allows the
060 * flexibility of using separate filesystem objects for reading and writing hfiles and wals.
061 */
062@InterfaceAudience.Private
063public class HFileSystem extends FilterFileSystem {
064  public static final Logger LOG = LoggerFactory.getLogger(HFileSystem.class);
065
066  private final FileSystem noChecksumFs; // read hfile data from storage
067  private final boolean useHBaseChecksum;
068  private static volatile byte unspecifiedStoragePolicyId = Byte.MIN_VALUE;
069
070  /**
071   * Create a FileSystem object for HBase regionservers.
072   * @param conf             The configuration to be used for the filesystem
073   * @param useHBaseChecksum if true, then use checksum verfication in hbase, otherwise delegate
074   *                         checksum verification to the FileSystem.
075   */
076  public HFileSystem(Configuration conf, boolean useHBaseChecksum) throws IOException {
077
078    // Create the default filesystem with checksum verification switched on.
079    // By default, any operation to this FilterFileSystem occurs on
080    // the underlying filesystem that has checksums switched on.
081    // This FS#get(URI, conf) clearly indicates in the javadoc that if the FS is
082    // not created it will initialize the FS and return that created FS. If it is
083    // already created it will just return the FS that was already created.
084    // We take pains to funnel all of our FileSystem instantiation through this call to ensure
085    // we never need to call FS.initialize ourself so that we do not have to track any state to
086    // avoid calling initialize more than once.
087    this.fs = FileSystem.get(getDefaultUri(conf), conf);
088    this.useHBaseChecksum = useHBaseChecksum;
089
090    // disable checksum verification for local fileSystem, see HBASE-11218
091    if (fs instanceof LocalFileSystem) {
092      fs.setWriteChecksum(false);
093      fs.setVerifyChecksum(false);
094    }
095
096    addLocationsOrderInterceptor(conf);
097
098    // If hbase checksum verification is switched on, then create a new
099    // filesystem object that has cksum verification turned off.
100    // We will avoid verifying checksums in the fs client, instead do it
101    // inside of hbase.
102    // If this is the local file system hadoop has a bug where seeks
103    // do not go to the correct location if setVerifyChecksum(false) is called.
104    // This manifests itself in that incorrect data is read and HFileBlocks won't be able to read
105    // their header magic numbers. See HBASE-5885
106    if (useHBaseChecksum && !(fs instanceof LocalFileSystem)) {
107      conf = new Configuration(conf);
108      conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
109      this.noChecksumFs = maybeWrapFileSystem(newInstanceFileSystem(conf), conf);
110      this.noChecksumFs.setVerifyChecksum(false);
111    } else {
112      this.noChecksumFs = maybeWrapFileSystem(fs, conf);
113    }
114
115    this.fs = maybeWrapFileSystem(this.fs, conf);
116  }
117
118  /**
119   * Wrap a FileSystem object within a HFileSystem. The noChecksumFs and writefs are both set to be
120   * the same specified fs. Do not verify hbase-checksums while reading data from filesystem.
121   * @param fs Set the noChecksumFs and writeFs to this specified filesystem.
122   */
123  public HFileSystem(FileSystem fs) {
124    this.fs = fs;
125    this.noChecksumFs = fs;
126    this.useHBaseChecksum = false;
127  }
128
129  /**
130   * Returns the filesystem that is specially setup for doing reads from storage. This object avoids
131   * doing checksum verifications for reads.
132   * @return The FileSystem object that can be used to read data from files.
133   */
134  public FileSystem getNoChecksumFs() {
135    return noChecksumFs;
136  }
137
138  /**
139   * Returns the underlying filesystem
140   * @return The underlying FileSystem for this FilterFileSystem object.
141   */
142  public FileSystem getBackingFs() throws IOException {
143    return fs;
144  }
145
146  /**
147   * Set the source path (directory/file) to the specified storage policy.
148   * @param path       The source path (directory/file).
149   * @param policyName The name of the storage policy: 'HOT', 'COLD', etc. See see hadoop 2.6+
150   *                   org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g 'COLD',
151   *                   'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.
152   */
153  public void setStoragePolicy(Path path, String policyName) {
154    CommonFSUtils.setStoragePolicy(this.fs, path, policyName);
155  }
156
157  /**
158   * Get the storage policy of the source path (directory/file).
159   * @param path The source path (directory/file).
160   * @return Storage policy name, or {@code null} if not using {@link DistributedFileSystem} or
161   *         exception thrown when trying to get policy
162   */
163  @Nullable
164  public String getStoragePolicyName(Path path) {
165    try {
166      Object blockStoragePolicySpi =
167        ReflectionUtils.invokeMethod(this.fs, "getStoragePolicy", path);
168      return (String) ReflectionUtils.invokeMethod(blockStoragePolicySpi, "getName");
169    } catch (Exception e) {
170      // Maybe fail because of using old HDFS version, try the old way
171      if (LOG.isTraceEnabled()) {
172        LOG.trace("Failed to get policy directly", e);
173      }
174      return getStoragePolicyForOldHDFSVersion(path);
175    }
176  }
177
178  /**
179   * Before Hadoop 2.8.0, there's no getStoragePolicy method for FileSystem interface, and we need
180   * to keep compatible with it. See HADOOP-12161 for more details.
181   * @param path Path to get storage policy against
182   * @return the storage policy name
183   */
184  private String getStoragePolicyForOldHDFSVersion(Path path) {
185    try {
186      if (this.fs instanceof DistributedFileSystem) {
187        DistributedFileSystem dfs = (DistributedFileSystem) this.fs;
188        HdfsFileStatus status = dfs.getClient().getFileInfo(path.toUri().getPath());
189        if (null != status) {
190          if (unspecifiedStoragePolicyId < 0) {
191            // Get the unspecified id field through reflection to avoid compilation error.
192            // In later version BlockStoragePolicySuite#ID_UNSPECIFIED is moved to
193            // HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED
194            Field idUnspecified = BlockStoragePolicySuite.class.getField("ID_UNSPECIFIED");
195            unspecifiedStoragePolicyId = idUnspecified.getByte(BlockStoragePolicySuite.class);
196          }
197          byte storagePolicyId = status.getStoragePolicy();
198          if (storagePolicyId != unspecifiedStoragePolicyId) {
199            BlockStoragePolicy[] policies = dfs.getStoragePolicies();
200            for (BlockStoragePolicy policy : policies) {
201              if (policy.getId() == storagePolicyId) {
202                return policy.getName();
203              }
204            }
205          }
206        }
207      }
208    } catch (Throwable e) {
209      LOG.warn("failed to get block storage policy of [" + path + "]", e);
210    }
211
212    return null;
213  }
214
215  /**
216   * Are we verifying checksums in HBase?
217   * @return True, if hbase is configured to verify checksums, otherwise false.
218   */
219  public boolean useHBaseChecksum() {
220    return useHBaseChecksum;
221  }
222
223  /**
224   * Close this filesystem object
225   */
226  @Override
227  public void close() throws IOException {
228    super.close();
229    if (this.noChecksumFs != fs) {
230      this.noChecksumFs.close();
231    }
232  }
233
234  /**
235   * Returns a brand new instance of the FileSystem. It does not use the FileSystem.Cache. In newer
236   * versions of HDFS, we can directly invoke FileSystem.newInstance(Configuration).
237   * @param conf Configuration
238   * @return A new instance of the filesystem
239   */
240  private static FileSystem newInstanceFileSystem(Configuration conf) throws IOException {
241    URI uri = FileSystem.getDefaultUri(conf);
242    FileSystem fs = null;
243    Class<?> clazz = conf.getClass("fs." + uri.getScheme() + ".impl", null);
244    if (clazz != null) {
245      // This will be true for Hadoop 1.0, or 0.20.
246      fs = (FileSystem) org.apache.hadoop.util.ReflectionUtils.newInstance(clazz, conf);
247      fs.initialize(uri, conf);
248    } else {
249      // For Hadoop 2.0, we have to go through FileSystem for the filesystem
250      // implementation to be loaded by the service loader in case it has not
251      // been loaded yet.
252      Configuration clone = new Configuration(conf);
253      clone.setBoolean("fs." + uri.getScheme() + ".impl.disable.cache", true);
254      fs = FileSystem.get(uri, clone);
255    }
256    if (fs == null) {
257      throw new IOException("No FileSystem for scheme: " + uri.getScheme());
258    }
259
260    return fs;
261  }
262
263  /**
264   * Returns an instance of Filesystem wrapped into the class specified in hbase.fs.wrapper
265   * property, if one is set in the configuration, returns unmodified FS instance passed in as an
266   * argument otherwise.
267   * @param base Filesystem instance to wrap
268   * @param conf Configuration
269   * @return wrapped instance of FS, or the same instance if no wrapping configured.
270   */
271  private FileSystem maybeWrapFileSystem(FileSystem base, Configuration conf) {
272    try {
273      Class<?> clazz = conf.getClass("hbase.fs.wrapper", null);
274      if (clazz != null) {
275        return (FileSystem) clazz.getConstructor(FileSystem.class, Configuration.class)
276          .newInstance(base, conf);
277      }
278    } catch (Exception e) {
279      LOG.error("Failed to wrap filesystem: " + e);
280    }
281    return base;
282  }
283
284  public static boolean addLocationsOrderInterceptor(Configuration conf) throws IOException {
285    return addLocationsOrderInterceptor(conf, new ReorderWALBlocks());
286  }
287
288  /**
289   * Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient linked to
290   * this FileSystem. See HBASE-6435 for the background.
291   * <p/>
292   * There should be no reason, except testing, to create a specific ReorderBlocks.
293   * @return true if the interceptor was added, false otherwise.
294   */
295  static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlocks lrb) {
296    if (!conf.getBoolean("hbase.filesystem.reorder.blocks", true)) { // activated by default
297      LOG.debug("addLocationsOrderInterceptor configured to false");
298      return false;
299    }
300
301    FileSystem fs;
302    try {
303      fs = FileSystem.get(conf);
304    } catch (IOException e) {
305      LOG.warn("Can't get the file system from the conf.", e);
306      return false;
307    }
308
309    if (!(fs instanceof DistributedFileSystem)) {
310      LOG.debug("The file system is not a DistributedFileSystem. "
311        + "Skipping on block location reordering");
312      return false;
313    }
314
315    DistributedFileSystem dfs = (DistributedFileSystem) fs;
316    DFSClient dfsc = dfs.getClient();
317    if (dfsc == null) {
318      LOG.warn("The DistributedFileSystem does not contain a DFSClient. Can't add the location "
319        + "block reordering interceptor. Continuing, but this is unexpected.");
320      return false;
321    }
322
323    try {
324      Field nf = DFSClient.class.getDeclaredField("namenode");
325      nf.setAccessible(true);
326      Field modifiersField = ReflectionUtils.getModifiersField();
327      modifiersField.setAccessible(true);
328      modifiersField.setInt(nf, nf.getModifiers() & ~Modifier.FINAL);
329
330      ClientProtocol namenode = (ClientProtocol) nf.get(dfsc);
331      if (namenode == null) {
332        LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block"
333          + " reordering interceptor. Continuing, but this is unexpected.");
334        return false;
335      }
336
337      ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf);
338      nf.set(dfsc, cp1);
339      LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering"
340        + " using class " + lrb.getClass().getName());
341    } catch (NoSuchFieldException e) {
342      LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
343      return false;
344    } catch (IllegalAccessException e) {
345      LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
346      return false;
347    }
348
349    return true;
350  }
351
352  private static ClientProtocol createReorderingProxy(final ClientProtocol cp,
353    final ReorderBlocks lrb, final Configuration conf) {
354    return (ClientProtocol) Proxy.newProxyInstance(cp.getClass().getClassLoader(),
355      new Class[] { ClientProtocol.class, Closeable.class }, new InvocationHandler() {
356        @Override
357        public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
358          try {
359            if ((args == null || args.length == 0) && "close".equals(method.getName())) {
360              RPC.stopProxy(cp);
361              return null;
362            } else {
363              Object res = method.invoke(cp, args);
364              if (
365                res != null && args != null && args.length == 3
366                  && "getBlockLocations".equals(method.getName()) && res instanceof LocatedBlocks
367                  && args[0] instanceof String && args[0] != null
368              ) {
369                lrb.reorderBlocks(conf, (LocatedBlocks) res, (String) args[0]);
370              }
371              return res;
372            }
373          } catch (InvocationTargetException ite) {
374            // We will have this for all the exception, checked on not, sent
375            // by any layer, including the functional exception
376            Throwable cause = ite.getCause();
377            if (cause == null) {
378              throw new RuntimeException("Proxy invocation failed and getCause is null", ite);
379            }
380            if (cause instanceof UndeclaredThrowableException) {
381              Throwable causeCause = cause.getCause();
382              if (causeCause == null) {
383                throw new RuntimeException("UndeclaredThrowableException had null cause!");
384              }
385              cause = cause.getCause();
386            }
387            throw cause;
388          }
389        }
390      });
391  }
392
393  /**
394   * Interface to implement to add a specific reordering logic in hdfs.
395   */
396  interface ReorderBlocks {
397    /**
398     * @param conf - the conf to use
399     * @param lbs  - the LocatedBlocks to reorder
400     * @param src  - the file name currently read
401     * @throws IOException - if something went wrong
402     */
403    void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) throws IOException;
404  }
405
406  /**
407   * We're putting at lowest priority the wal files blocks that are on the same datanode as the
408   * original regionserver which created these files. This because we fear that the datanode is
409   * actually dead, so if we use it it will timeout.
410   */
411  static class ReorderWALBlocks implements ReorderBlocks {
412    @Override
413    public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src)
414      throws IOException {
415
416      ServerName sn = AbstractFSWALProvider.getServerNameFromWALDirectoryName(conf, src);
417      if (sn == null) {
418        // It's not an WAL
419        return;
420      }
421
422      // Ok, so it's an WAL
423      String hostName = sn.getHostname();
424      if (LOG.isTraceEnabled()) {
425        LOG.trace(src + " is an WAL file, so reordering blocks, last hostname will be:" + hostName);
426      }
427
428      // Just check for all blocks
429      for (LocatedBlock lb : lbs.getLocatedBlocks()) {
430        DatanodeInfo[] dnis = getLocatedBlockLocations(lb);
431        if (dnis != null && dnis.length > 1) {
432          boolean found = false;
433          for (int i = 0; i < dnis.length - 1 && !found; i++) {
434            if (hostName.equals(dnis[i].getHostName())) {
435              // advance the other locations by one and put this one at the last place.
436              DatanodeInfo toLast = dnis[i];
437              System.arraycopy(dnis, i + 1, dnis, i, dnis.length - i - 1);
438              dnis[dnis.length - 1] = toLast;
439              found = true;
440            }
441          }
442        }
443      }
444    }
445  }
446
447  /**
448   * Create a new HFileSystem object, similar to FileSystem.get(). This returns a filesystem object
449   * that avoids checksum verification in the filesystem for hfileblock-reads. For these blocks,
450   * checksum verification is done by HBase.
451   */
452  static public FileSystem get(Configuration conf) throws IOException {
453    return new HFileSystem(conf, true);
454  }
455
456  /**
457   * The org.apache.hadoop.fs.FilterFileSystem does not yet support createNonRecursive. This is a
458   * hadoop bug and when it is fixed in Hadoop, this definition will go away.
459   */
460  @Override
461  @SuppressWarnings("deprecation")
462  public FSDataOutputStream createNonRecursive(Path f, boolean overwrite, int bufferSize,
463    short replication, long blockSize, Progressable progress) throws IOException {
464    return fs.createNonRecursive(f, overwrite, bufferSize, replication, blockSize, progress);
465  }
466}