View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.util.ArrayList;
24  import java.util.HashSet;
25  import java.util.List;
26  import java.util.Map;
27  import java.util.NavigableMap;
28  import java.util.Set;
29  import java.util.concurrent.locks.Lock;
30  
31  import org.apache.commons.logging.Log;
32  import org.apache.commons.logging.LogFactory;
33  import org.apache.hadoop.classification.InterfaceAudience;
34  import org.apache.hadoop.hbase.HConstants;
35  import org.apache.hadoop.hbase.HRegionInfo;
36  import org.apache.hadoop.hbase.Server;
37  import org.apache.hadoop.hbase.ServerName;
38  import org.apache.hadoop.hbase.catalog.CatalogTracker;
39  import org.apache.hadoop.hbase.catalog.MetaReader;
40  import org.apache.hadoop.hbase.client.Result;
41  import org.apache.hadoop.hbase.executor.EventHandler;
42  import org.apache.hadoop.hbase.executor.EventType;
43  import org.apache.hadoop.hbase.master.AssignmentManager;
44  import org.apache.hadoop.hbase.master.DeadServer;
45  import org.apache.hadoop.hbase.master.MasterServices;
46  import org.apache.hadoop.hbase.master.RegionState;
47  import org.apache.hadoop.hbase.master.RegionState.State;
48  import org.apache.hadoop.hbase.master.RegionStates;
49  import org.apache.hadoop.hbase.master.ServerManager;
50  import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
51  import org.apache.hadoop.hbase.zookeeper.ZKAssign;
52  import org.apache.zookeeper.KeeperException;
53  
54  /**
55   * Process server shutdown.
56   * Server-to-handle must be already in the deadservers lists.  See
57   * {@link ServerManager#expireServer(ServerName)}
58   */
59  @InterfaceAudience.Private
60  public class ServerShutdownHandler extends EventHandler {
61    private static final Log LOG = LogFactory.getLog(ServerShutdownHandler.class);
62    protected final ServerName serverName;
63    protected final MasterServices services;
64    protected final DeadServer deadServers;
65    protected final boolean shouldSplitHlog; // whether to split HLog or not
66    protected final boolean distributedLogReplay;
67    protected final int regionAssignmentWaitTimeout;
68  
69    public ServerShutdownHandler(final Server server, final MasterServices services,
70        final DeadServer deadServers, final ServerName serverName,
71        final boolean shouldSplitHlog) {
72      this(server, services, deadServers, serverName, EventType.M_SERVER_SHUTDOWN,
73          shouldSplitHlog);
74    }
75  
76    ServerShutdownHandler(final Server server, final MasterServices services,
77        final DeadServer deadServers, final ServerName serverName, EventType type,
78        final boolean shouldSplitHlog) {
79      super(server, type);
80      this.serverName = serverName;
81      this.server = server;
82      this.services = services;
83      this.deadServers = deadServers;
84      if (!this.deadServers.isDeadServer(this.serverName)) {
85        LOG.warn(this.serverName + " is NOT in deadservers; it should be!");
86      }
87      this.shouldSplitHlog = shouldSplitHlog;
88      this.distributedLogReplay = HLogSplitter.isDistributedLogReplay(server.getConfiguration());
89      this.regionAssignmentWaitTimeout = server.getConfiguration().getInt(
90        HConstants.LOG_REPLAY_WAIT_REGION_TIMEOUT, 15000);
91    }
92  
93    @Override
94    public String getInformativeName() {
95      if (serverName != null) {
96        return this.getClass().getSimpleName() + " for " + serverName;
97      } else {
98        return super.getInformativeName();
99      }
100   }
101 
102   /**
103    * @return True if the server we are processing was carrying <code>hbase:meta</code>
104    */
105   boolean isCarryingMeta() {
106     return false;
107   }
108 
109   @Override
110   public String toString() {
111     String name = "UnknownServerName";
112     if(server != null && server.getServerName() != null) {
113       name = server.getServerName().toString();
114     }
115     return getClass().getSimpleName() + "-" + name + "-" + getSeqid();
116   }
117 
118   @Override
119   public void process() throws IOException {
120     boolean hasLogReplayWork = false;
121     final ServerName serverName = this.serverName;
122     try {
123 
124       // We don't want worker thread in the MetaServerShutdownHandler
125       // executor pool to block by waiting availability of hbase:meta
126       // Otherwise, it could run into the following issue:
127       // 1. The current MetaServerShutdownHandler instance For RS1 waits for the hbase:meta
128       //    to come online.
129       // 2. The newly assigned hbase:meta region server RS2 was shutdown right after
130       //    it opens the hbase:meta region. So the MetaServerShutdownHandler
131       //    instance For RS1 will still be blocked.
132       // 3. The new instance of MetaServerShutdownHandler for RS2 is queued.
133       // 4. The newly assigned hbase:meta region server RS3 was shutdown right after
134       //    it opens the hbase:meta region. So the MetaServerShutdownHandler
135       //    instance For RS1 and RS2 will still be blocked.
136       // 5. The new instance of MetaServerShutdownHandler for RS3 is queued.
137       // 6. Repeat until we run out of MetaServerShutdownHandler worker threads
138       // The solution here is to resubmit a ServerShutdownHandler request to process
139       // user regions on that server so that MetaServerShutdownHandler
140       // executor pool is always available.
141       //
142       // If AssignmentManager hasn't finished rebuilding user regions,
143       // we are not ready to assign dead regions either. So we re-queue up
144       // the dead server for further processing too.
145       AssignmentManager am = services.getAssignmentManager();
146       if (isCarryingMeta() // hbase:meta
147           || !am.isFailoverCleanupDone()) {
148         this.services.getServerManager().processDeadServer(serverName, this.shouldSplitHlog);
149         return;
150       }
151 
152       // Wait on meta to come online; we need it to progress.
153       // TODO: Best way to hold strictly here?  We should build this retry logic
154       // into the MetaReader operations themselves.
155       // TODO: Is the reading of hbase:meta necessary when the Master has state of
156       // cluster in its head?  It should be possible to do without reading hbase:meta
157       // in all but one case. On split, the RS updates the hbase:meta
158       // table and THEN informs the master of the split via zk nodes in
159       // 'unassigned' dir.  Currently the RS puts ephemeral nodes into zk so if
160       // the regionserver dies, these nodes do not stick around and this server
161       // shutdown processing does fixup (see the fixupDaughters method below).
162       // If we wanted to skip the hbase:meta scan, we'd have to change at least the
163       // final SPLIT message to be permanent in zk so in here we'd know a SPLIT
164       // completed (zk is updated after edits to hbase:meta have gone in).  See
165       // {@link SplitTransaction}.  We'd also have to be figure another way for
166       // doing the below hbase:meta daughters fixup.
167       NavigableMap<HRegionInfo, Result> hris = null;
168       while (!this.server.isStopped()) {
169         try {
170           this.server.getCatalogTracker().waitForMeta();
171           // Skip getting user regions if the server is stopped.
172           if (!this.server.isStopped()) {
173             hris = MetaReader.getServerUserRegions(this.server.getCatalogTracker(),
174                 this.serverName);
175           }
176           break;
177         } catch (InterruptedException e) {
178           Thread.currentThread().interrupt();
179           throw (InterruptedIOException)new InterruptedIOException().initCause(e);
180         } catch (IOException ioe) {
181           LOG.info("Received exception accessing hbase:meta during server shutdown of " +
182             serverName + ", retrying hbase:meta read", ioe);
183         }
184       }
185       if (this.server.isStopped()) {
186         throw new IOException("Server is stopped");
187       }
188 
189       try {
190         if (this.shouldSplitHlog) {
191           LOG.info("Splitting logs for " + serverName + " before assignment.");
192           if (this.distributedLogReplay) {
193             LOG.info("Mark regions in recovery before assignment.");
194             Set<ServerName> serverNames = new HashSet<ServerName>();
195             serverNames.add(serverName);
196             this.services.getMasterFileSystem().prepareLogReplay(serverNames);
197           } else {
198             this.services.getMasterFileSystem().splitLog(serverName);
199           }
200           am.getRegionStates().logSplit(serverName);
201         } else {
202           LOG.info("Skipping log splitting for " + serverName);
203         }
204       } catch (IOException ioe) {
205         resubmit(serverName, ioe);
206       }
207 
208       // Clean out anything in regions in transition.  Being conservative and
209       // doing after log splitting.  Could do some states before -- OPENING?
210       // OFFLINE? -- and then others after like CLOSING that depend on log
211       // splitting.
212       List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
213       LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) +
214         " region(s) that " + (serverName == null? "null": serverName)  +
215         " was carrying (and " + regionsInTransition.size() +
216         " regions(s) that were opening on this server)");
217 
218       List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
219       toAssignRegions.addAll(regionsInTransition);
220 
221       // Iterate regions that were on this server and assign them
222       if (hris != null) {
223         RegionStates regionStates = am.getRegionStates();
224         for (Map.Entry<HRegionInfo, Result> e: hris.entrySet()) {
225           HRegionInfo hri = e.getKey();
226           if (regionsInTransition.contains(hri)) {
227             continue;
228           }
229           String encodedName = hri.getEncodedName();
230           Lock lock = am.acquireRegionLock(encodedName);
231           try {
232             RegionState rit = regionStates.getRegionTransitionState(hri);
233             if (processDeadRegion(hri, e.getValue(), am, server.getCatalogTracker())) {
234               ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
235               if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
236                 // If this region is in transition on the dead server, it must be
237                 // opening or pending_open, which should have been covered by AM#processServerShutdown
238                 LOG.info("Skip assigning region " + hri.getRegionNameAsString()
239                   + " because it has been opened in " + addressFromAM.getServerName());
240                 continue;
241               }
242               if (rit != null) {
243                 if (rit.getServerName() != null && !rit.isOnServer(serverName)) {
244                   // Skip regions that are in transition on other server
245                   LOG.info("Skip assigning region in transition on other server" + rit);
246                   continue;
247                 }
248                 try{
249                   //clean zk node
250                   LOG.info("Reassigning region with rs = " + rit + " and deleting zk node if exists");
251                   ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), hri);
252                   regionStates.updateRegionState(hri, State.OFFLINE);
253                 } catch (KeeperException ke) {
254                   this.server.abort("Unexpected ZK exception deleting unassigned node " + hri, ke);
255                   return;
256                 }
257               } else if (regionStates.isRegionInState(
258                   hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
259                 regionStates.regionOffline(hri);
260               }
261               toAssignRegions.add(hri);
262             } else if (rit != null) {
263               if (rit.isPendingCloseOrClosing()
264                   && am.getZKTable().isDisablingOrDisabledTable(hri.getTable())) {
265                 // If the table was partially disabled and the RS went down, we should clear the RIT
266                 // and remove the node for the region.
267                 // The rit that we use may be stale in case the table was in DISABLING state
268                 // but though we did assign we will not be clearing the znode in CLOSING state.
269                 // Doing this will have no harm. See HBASE-5927
270                 regionStates.updateRegionState(hri, State.OFFLINE);
271                 am.deleteClosingOrClosedNode(hri, rit.getServerName());
272                 am.offlineDisabledRegion(hri);
273               } else {
274                 LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "
275                   + rit + " not to be assigned by SSH of server " + serverName);
276               }
277             }
278           } finally {
279             lock.unlock();
280           }
281         }
282       }
283 
284       try {
285         am.assign(toAssignRegions);
286       } catch (InterruptedException ie) {
287         LOG.error("Caught " + ie + " during round-robin assignment");
288         throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
289       }
290 
291       if (this.shouldSplitHlog && this.distributedLogReplay) {
292         // wait for region assignment completes
293         for (HRegionInfo hri : toAssignRegions) {
294           try {
295             if (!am.waitOnRegionToClearRegionsInTransition(hri, regionAssignmentWaitTimeout)) {
296               // Wait here is to avoid log replay hits current dead server and incur a RPC timeout
297               // when replay happens before region assignment completes.
298               LOG.warn("Region " + hri.getEncodedName()
299                   + " didn't complete assignment in time");
300             }
301           } catch (InterruptedException ie) {
302             throw new InterruptedIOException("Caught " + ie
303                 + " during waitOnRegionToClearRegionsInTransition");
304           }
305         }
306         // submit logReplay work
307         this.services.getExecutorService().submit(
308           new LogReplayHandler(this.server, this.services, this.deadServers, this.serverName));
309         hasLogReplayWork = true;
310       }
311     } finally {
312       this.deadServers.finish(serverName);
313     }
314 
315     if (!hasLogReplayWork) {
316       LOG.info("Finished processing of shutdown of " + serverName);
317     }
318   }
319 
320   private void resubmit(final ServerName serverName, IOException ex) throws IOException {
321     // typecast to SSH so that we make sure that it is the SSH instance that
322     // gets submitted as opposed to MSSH or some other derived instance of SSH
323     this.services.getExecutorService().submit((ServerShutdownHandler) this);
324     this.deadServers.add(serverName);
325     throw new IOException("failed log splitting for " + serverName + ", will retry", ex);
326   }
327 
328   /**
329    * Process a dead region from a dead RS. Checks if the region is disabled or
330    * disabling or if the region has a partially completed split.
331    * @param hri
332    * @param result
333    * @param assignmentManager
334    * @param catalogTracker
335    * @return Returns true if specified region should be assigned, false if not.
336    * @throws IOException
337    */
338   public static boolean processDeadRegion(HRegionInfo hri, Result result,
339       AssignmentManager assignmentManager, CatalogTracker catalogTracker)
340   throws IOException {
341     boolean tablePresent = assignmentManager.getZKTable().isTablePresent(hri.getTable());
342     if (!tablePresent) {
343       LOG.info("The table " + hri.getTable()
344           + " was deleted.  Hence not proceeding.");
345       return false;
346     }
347     // If table is not disabled but the region is offlined,
348     boolean disabled = assignmentManager.getZKTable().isDisabledTable(hri.getTable());
349     if (disabled){
350       LOG.info("The table " + hri.getTable()
351           + " was disabled.  Hence not proceeding.");
352       return false;
353     }
354     if (hri.isOffline() && hri.isSplit()) {
355       //HBASE-7721: Split parent and daughters are inserted into hbase:meta as an atomic operation.
356       //If the meta scanner saw the parent split, then it should see the daughters as assigned
357       //to the dead server. We don't have to do anything.
358       return false;
359     }
360     boolean disabling = assignmentManager.getZKTable().isDisablingTable(hri.getTable());
361     if (disabling) {
362       LOG.info("The table " + hri.getTable()
363           + " is disabled.  Hence not assigning region" + hri.getEncodedName());
364       return false;
365     }
366     return true;
367   }
368 }