View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.util;
20  
21  import java.io.FileNotFoundException;
22  import java.io.IOException;
23  import java.io.InterruptedIOException;
24  import java.lang.reflect.Method;
25  import java.net.InetSocketAddress;
26  import java.net.URI;
27  import java.util.HashSet;
28  import java.util.Map;
29  import java.util.Set;
30  
31  import com.google.common.collect.Sets;
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.FileSystem;
36  import org.apache.hadoop.fs.Path;
37  import org.apache.hadoop.hdfs.DistributedFileSystem;
38  import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
39  
40  
41  /**
42   * Implementation for hdfs
43   */
44  public class FSHDFSUtils extends FSUtils{
45    private static final Log LOG = LogFactory.getLog(FSHDFSUtils.class);
46    private static Class dfsUtilClazz;
47    private static Method getNNAddressesMethod;
48  
49    /**
50     * @param fs
51     * @param conf
52     * @return A set containing all namenode addresses of fs
53     */
54    private static Set<InetSocketAddress> getNNAddresses(DistributedFileSystem fs,
55                                                        Configuration conf) {
56      Set<InetSocketAddress> addresses = new HashSet<InetSocketAddress>();
57      String serviceName = fs.getCanonicalServiceName();
58  
59      if (serviceName.startsWith("ha-hdfs")) {
60        try {
61          if (dfsUtilClazz == null) {
62            dfsUtilClazz = Class.forName("org.apache.hadoop.hdfs.DFSUtil");
63          }
64          if (getNNAddressesMethod == null) {
65            getNNAddressesMethod =
66                    dfsUtilClazz.getMethod("getNNServiceRpcAddresses", Configuration.class);
67          }
68  
69          Map<String, Map<String, InetSocketAddress>> addressMap =
70                  (Map<String, Map<String, InetSocketAddress>>) getNNAddressesMethod
71                          .invoke(null, conf);
72          for (Map.Entry<String, Map<String, InetSocketAddress>> entry : addressMap.entrySet()) {
73            Map<String, InetSocketAddress> nnMap = entry.getValue();
74            for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) {
75              InetSocketAddress addr = e2.getValue();
76              addresses.add(addr);
77            }
78          }
79        } catch (Exception e) {
80          LOG.warn("DFSUtil.getNNServiceRpcAddresses failed. serviceName=" + serviceName, e);
81        }
82      } else {
83        URI uri = fs.getUri();
84        InetSocketAddress addr = new InetSocketAddress(uri.getHost(), uri.getPort());
85        addresses.add(addr);
86      }
87  
88      return addresses;
89    }
90  
91    /**
92     * @param conf the Configuration of HBase
93     * @param srcFs
94     * @param desFs
95     * @return Whether srcFs and desFs are on same hdfs or not
96     */
97    public static boolean isSameHdfs(Configuration conf, FileSystem srcFs, FileSystem desFs) {
98      // By getCanonicalServiceName, we could make sure both srcFs and desFs
99      // show a unified format which contains scheme, host and port.
100     String srcServiceName = srcFs.getCanonicalServiceName();
101     String desServiceName = desFs.getCanonicalServiceName();
102 
103     if (srcServiceName == null || desServiceName == null) {
104       return false;
105     }
106     if (srcServiceName.equals(desServiceName)) {
107       return true;
108     }
109     if (srcFs instanceof DistributedFileSystem && desFs instanceof DistributedFileSystem) {
110       //If one serviceName is an HA format while the other is a non-HA format,
111       // maybe they refer to the same FileSystem.
112       //For example, srcFs is "ha-hdfs://nameservices" and desFs is "hdfs://activeNamenode:port"
113       Set<InetSocketAddress> srcAddrs = getNNAddresses((DistributedFileSystem) srcFs, conf);
114       Set<InetSocketAddress> desAddrs = getNNAddresses((DistributedFileSystem) desFs, conf);
115       if (Sets.intersection(srcAddrs, desAddrs).size() > 0) {
116         return true;
117       }
118     }
119 
120     return false;
121   }
122 
123   /**
124    * Recover the lease from HDFS, retrying multiple times.
125    */
126   @Override
127   public void recoverFileLease(final FileSystem fs, final Path p,
128       Configuration conf)
129   throws IOException {
130     if (!isAppendSupported(conf)) {
131       LOG.warn("Running on HDFS without append enabled may result in data loss");
132       return;
133     }
134     // lease recovery not needed for local file system case.
135     if (!(fs instanceof DistributedFileSystem)) return;
136     recoverDFSFileLease((DistributedFileSystem)fs, p, conf);
137   }
138 
139   /*
140    * Run the dfs recover lease. recoverLease is asynchronous. It returns:
141    *    -false when it starts the lease recovery (i.e. lease recovery not *yet* done)
142    *    - true when the lease recovery has succeeded or the file is closed.
143    * But, we have to be careful.  Each time we call recoverLease, it starts the recover lease
144    * process over from the beginning.  We could put ourselves in a situation where we are
145    * doing nothing but starting a recovery, interrupting it to start again, and so on.
146    * The findings over in HBASE-8354 have it that the namenode will try to recover the lease
147    * on the file's primary node.  If all is well, it should return near immediately.  But,
148    * as is common, it is the very primary node that has crashed and so the namenode will be
149    * stuck waiting on a socket timeout before it will ask another datanode to start the
150    * recovery. It does not help if we call recoverLease in the meantime and in particular,
151    * subsequent to the socket timeout, a recoverLease invocation will cause us to start
152    * over from square one (possibly waiting on socket timeout against primary node).  So,
153    * in the below, we do the following:
154    * 1. Call recoverLease.
155    * 2. If it returns true, break.
156    * 3. If it returns false, wait a few seconds and then call it again.
157    * 4. If it returns true, break.
158    * 5. If it returns false, wait for what we think the datanode socket timeout is
159    * (configurable) and then try again.
160    * 6. If it returns true, break.
161    * 7. If it returns false, repeat starting at step 5. above.
162    *
163    * If HDFS-4525 is available, call it every second and we might be able to exit early.
164    */
165   boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p,
166       final Configuration conf)
167   throws IOException {
168     LOG.info("Recovering lease on dfs file " + p);
169     long startWaiting = EnvironmentEdgeManager.currentTimeMillis();
170     // Default is 15 minutes. It's huge, but the idea is that if we have a major issue, HDFS
171     // usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves
172     // beyond that limit 'to be safe'.
173     long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 900000) + startWaiting;
174     // This setting should be what the cluster dfs heartbeat is set to.
175     long firstPause = conf.getInt("hbase.lease.recovery.first.pause", 3000);
176     // This should be set to how long it'll take for us to timeout against primary datanode if it
177     // is dead.  We set it to 61 seconds, 1 second than the default READ_TIMEOUT in HDFS, the
178     // default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY.
179     long subsequentPause = conf.getInt("hbase.lease.recovery.dfs.timeout", 61 * 1000);
180 
181     Method isFileClosedMeth = null;
182     // whether we need to look for isFileClosed method
183     boolean findIsFileClosedMeth = true;
184     boolean recovered = false;
185     // We break the loop if we succeed the lease recovery, timeout, or we throw an exception.
186     for (int nbAttempt = 0; !recovered; nbAttempt++) {
187       recovered = recoverLease(dfs, nbAttempt, p, startWaiting);
188       if (recovered) break;
189       if (checkIfTimedout(conf, recoveryTimeout, nbAttempt, p, startWaiting)) break;
190       try {
191         // On the first time through wait the short 'firstPause'.
192         if (nbAttempt == 0) {
193           Thread.sleep(firstPause);
194         } else {
195           // Cycle here until subsequentPause elapses.  While spinning, check isFileClosed if
196           // available (should be in hadoop 2.0.5... not in hadoop 1 though.
197           long localStartWaiting = EnvironmentEdgeManager.currentTimeMillis();
198           while ((EnvironmentEdgeManager.currentTimeMillis() - localStartWaiting) <
199               subsequentPause) {
200             Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000));
201             if (findIsFileClosedMeth) {
202                try {
203                  isFileClosedMeth = dfs.getClass().getMethod("isFileClosed",
204                    new Class[]{ Path.class });
205                } catch (NoSuchMethodException nsme) {
206                  LOG.debug("isFileClosed not available");
207                } finally {
208                  findIsFileClosedMeth = false;
209                }
210              }
211              if (isFileClosedMeth != null && isFileClosed(dfs, isFileClosedMeth, p)) {
212                recovered = true;
213                break;
214              }
215           }
216         }
217       } catch (InterruptedException ie) {
218         InterruptedIOException iioe = new InterruptedIOException();
219         iioe.initCause(ie);
220         throw iioe;
221       }
222     }
223     return recovered;
224   }
225 
226   boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout,
227       final int nbAttempt, final Path p, final long startWaiting) {
228     if (recoveryTimeout < EnvironmentEdgeManager.currentTimeMillis()) {
229       LOG.warn("Cannot recoverLease after trying for " +
230         conf.getInt("hbase.lease.recovery.timeout", 900000) +
231         "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " +
232         getLogMessageDetail(nbAttempt, p, startWaiting));
233       return true;
234     }
235     return false;
236   }
237 
238   /**
239    * Try to recover the lease.
240    * @param dfs
241    * @param nbAttempt
242    * @param p
243    * @param startWaiting
244    * @return True if dfs#recoverLease came by true.
245    * @throws FileNotFoundException
246    */
247   boolean recoverLease(final DistributedFileSystem dfs, final int nbAttempt, final Path p,
248       final long startWaiting)
249   throws FileNotFoundException {
250     boolean recovered = false;
251     try {
252       recovered = dfs.recoverLease(p);
253       LOG.info("recoverLease=" + recovered + ", " +
254         getLogMessageDetail(nbAttempt, p, startWaiting));
255     } catch (IOException e) {
256       if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) {
257         // This exception comes out instead of FNFE, fix it
258         throw new FileNotFoundException("The given HLog wasn't found at " + p);
259       } else if (e instanceof FileNotFoundException) {
260         throw (FileNotFoundException)e;
261       }
262       LOG.warn(getLogMessageDetail(nbAttempt, p, startWaiting), e);
263     }
264     return recovered;
265   }
266 
267   /**
268    * @param nbAttempt
269    * @param p
270    * @param startWaiting
271    * @return Detail to append to any log message around lease recovering.
272    */
273   private String getLogMessageDetail(final int nbAttempt, final Path p, final long startWaiting) {
274     return "attempt=" + nbAttempt + " on file=" + p + " after " +
275       (EnvironmentEdgeManager.currentTimeMillis() - startWaiting) + "ms";
276   }
277 
278   /**
279    * Call HDFS-4525 isFileClosed if it is available.
280    * @param dfs
281    * @param m
282    * @param p
283    * @return True if file is closed.
284    */
285   private boolean isFileClosed(final DistributedFileSystem dfs, final Method m, final Path p) {
286     try {
287       return (Boolean) m.invoke(dfs, p);
288     } catch (SecurityException e) {
289       LOG.warn("No access", e);
290     } catch (Exception e) {
291       LOG.warn("Failed invocation for " + p.toString(), e);
292     }
293     return false;
294   }
295 }