View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.util.ArrayList;
24  import java.util.List;
25  import java.util.Set;
26  import java.util.concurrent.locks.Lock;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.hbase.classification.InterfaceAudience;
31  import org.apache.hadoop.hbase.HConstants;
32  import org.apache.hadoop.hbase.HRegionInfo;
33  import org.apache.hadoop.hbase.Server;
34  import org.apache.hadoop.hbase.ServerName;
35  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
36  import org.apache.hadoop.hbase.client.TableState;
37  import org.apache.hadoop.hbase.executor.EventHandler;
38  import org.apache.hadoop.hbase.executor.EventType;
39  import org.apache.hadoop.hbase.master.AssignmentManager;
40  import org.apache.hadoop.hbase.master.DeadServer;
41  import org.apache.hadoop.hbase.master.MasterFileSystem;
42  import org.apache.hadoop.hbase.master.MasterServices;
43  import org.apache.hadoop.hbase.master.RegionState;
44  import org.apache.hadoop.hbase.master.RegionStates;
45  import org.apache.hadoop.hbase.master.ServerManager;
46  import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
47  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
48  
49  /**
50   * Process server shutdown.
51   * Server-to-handle must be already in the deadservers lists.  See
52   * {@link ServerManager#expireServer(ServerName)}
53   */
54  @InterfaceAudience.Private
55  public class ServerShutdownHandler extends EventHandler {
56    private static final Log LOG = LogFactory.getLog(ServerShutdownHandler.class);
57    protected final ServerName serverName;
58    protected final MasterServices services;
59    protected final DeadServer deadServers;
60    protected final boolean shouldSplitWal; // whether to split WAL or not
61    protected final int regionAssignmentWaitTimeout;
62  
63    public ServerShutdownHandler(final Server server, final MasterServices services,
64        final DeadServer deadServers, final ServerName serverName,
65        final boolean shouldSplitWal) {
66      this(server, services, deadServers, serverName, EventType.M_SERVER_SHUTDOWN,
67          shouldSplitWal);
68    }
69  
70    ServerShutdownHandler(final Server server, final MasterServices services,
71        final DeadServer deadServers, final ServerName serverName, EventType type,
72        final boolean shouldSplitWal) {
73      super(server, type);
74      this.serverName = serverName;
75      this.server = server;
76      this.services = services;
77      this.deadServers = deadServers;
78      if (!this.deadServers.isDeadServer(this.serverName)) {
79        LOG.warn(this.serverName + " is NOT in deadservers; it should be!");
80      }
81      this.shouldSplitWal = shouldSplitWal;
82      this.regionAssignmentWaitTimeout = server.getConfiguration().getInt(
83        HConstants.LOG_REPLAY_WAIT_REGION_TIMEOUT, 15000);
84    }
85  
86    @Override
87    public String getInformativeName() {
88      if (serverName != null) {
89        return this.getClass().getSimpleName() + " for " + serverName;
90      } else {
91        return super.getInformativeName();
92      }
93    }
94  
95    /**
96     * @return True if the server we are processing was carrying <code>hbase:meta</code>
97     */
98    boolean isCarryingMeta() {
99      return false;
100   }
101 
102   @Override
103   public String toString() {
104     return getClass().getSimpleName() + "-" + serverName + "-" + getSeqid();
105   }
106 
107   @Override
108   public void process() throws IOException {
109     boolean hasLogReplayWork = false;
110     final ServerName serverName = this.serverName;
111     try {
112 
113       // We don't want worker thread in the MetaServerShutdownHandler
114       // executor pool to block by waiting availability of hbase:meta
115       // Otherwise, it could run into the following issue:
116       // 1. The current MetaServerShutdownHandler instance For RS1 waits for the hbase:meta
117       //    to come online.
118       // 2. The newly assigned hbase:meta region server RS2 was shutdown right after
119       //    it opens the hbase:meta region. So the MetaServerShutdownHandler
120       //    instance For RS1 will still be blocked.
121       // 3. The new instance of MetaServerShutdownHandler for RS2 is queued.
122       // 4. The newly assigned hbase:meta region server RS3 was shutdown right after
123       //    it opens the hbase:meta region. So the MetaServerShutdownHandler
124       //    instance For RS1 and RS2 will still be blocked.
125       // 5. The new instance of MetaServerShutdownHandler for RS3 is queued.
126       // 6. Repeat until we run out of MetaServerShutdownHandler worker threads
127       // The solution here is to resubmit a ServerShutdownHandler request to process
128       // user regions on that server so that MetaServerShutdownHandler
129       // executor pool is always available.
130       //
131       // If AssignmentManager hasn't finished rebuilding user regions,
132       // we are not ready to assign dead regions either. So we re-queue up
133       // the dead server for further processing too.
134       AssignmentManager am = services.getAssignmentManager();
135       ServerManager serverManager = services.getServerManager();
136       if (isCarryingMeta() /* hbase:meta */ || !am.isFailoverCleanupDone()) {
137         serverManager.processDeadServer(serverName, this.shouldSplitWal);
138         return;
139       }
140 
141       // Wait on meta to come online; we need it to progress.
142       // TODO: Best way to hold strictly here?  We should build this retry logic
143       // into the MetaTableAccessor operations themselves.
144       // TODO: Is the reading of hbase:meta necessary when the Master has state of
145       // cluster in its head?  It should be possible to do without reading hbase:meta
146       // in all but one case. On split, the RS updates the hbase:meta
147       // table and THEN informs the master of the split via zk nodes in
148       // 'unassigned' dir.  Currently the RS puts ephemeral nodes into zk so if
149       // the regionserver dies, these nodes do not stick around and this server
150       // shutdown processing does fixup (see the fixupDaughters method below).
151       // If we wanted to skip the hbase:meta scan, we'd have to change at least the
152       // final SPLIT message to be permanent in zk so in here we'd know a SPLIT
153       // completed (zk is updated after edits to hbase:meta have gone in).  See
154       // {@link SplitTransaction}.  We'd also have to be figure another way for
155       // doing the below hbase:meta daughters fixup.
156       Set<HRegionInfo> hris = null;
157       try {
158         server.getMetaTableLocator().waitMetaRegionLocation(server.getZooKeeper());
159         if (BaseLoadBalancer.tablesOnMaster(server.getConfiguration())) {
160           while (!this.server.isStopped() && serverManager.countOfRegionServers() < 2) {
161             // Wait till at least another regionserver is up besides the active master
162             // so that we don't assign all regions to the active master.
163             // This is best of efforts, because newly joined regionserver
164             // could crash right after that.
165             Thread.sleep(100);
166           }
167         }
168         hris = am.getRegionStates().getServerRegions(serverName);
169       } catch (InterruptedException e) {
170         Thread.currentThread().interrupt();
171         throw (InterruptedIOException)new InterruptedIOException().initCause(e);
172       }
173       if (this.server.isStopped()) {
174         throw new IOException("Server is stopped");
175       }
176 
177       // delayed to set recovery mode based on configuration only after all outstanding splitlogtask
178       // drained
179       this.services.getMasterFileSystem().setLogRecoveryMode();
180       boolean distributedLogReplay = 
181         (this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);
182 
183       try {
184         if (this.shouldSplitWal) {
185           if (distributedLogReplay) {
186             LOG.info("Mark regions in recovery for crashed server " + serverName +
187               " before assignment; regions=" + hris);
188             MasterFileSystem mfs = this.services.getMasterFileSystem();
189             mfs.prepareLogReplay(serverName, hris);
190           } else {
191             LOG.info("Splitting logs for " + serverName +
192               " before assignment; region count=" + (hris == null ? 0 : hris.size()));
193             this.services.getMasterFileSystem().splitLog(serverName);
194           }
195           am.getRegionStates().logSplit(serverName);
196         } else {
197           LOG.info("Skipping log splitting for " + serverName);
198         }
199       } catch (IOException ioe) {
200         resubmit(serverName, ioe);
201       }
202       List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
203       int replicaCount = services.getConfiguration().getInt(HConstants.META_REPLICAS_NUM,
204           HConstants.DEFAULT_META_REPLICA_NUM);
205       for (int i = 1; i < replicaCount; i++) {
206         HRegionInfo metaHri =
207             RegionReplicaUtil.getRegionInfoForReplica(HRegionInfo.FIRST_META_REGIONINFO, i);
208         if (am.isCarryingMetaReplica(serverName, metaHri)) {
209           LOG.info("Reassigning meta replica" + metaHri + " that was on " + serverName);
210           toAssignRegions.add(metaHri);
211         }
212       }
213       // Clean out anything in regions in transition.  Being conservative and
214       // doing after log splitting.  Could do some states before -- OPENING?
215       // OFFLINE? -- and then others after like CLOSING that depend on log
216       // splitting.
217       List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
218       LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) +
219         " region(s) that " + (serverName == null? "null": serverName)  +
220         " was carrying (and " + regionsInTransition.size() +
221         " regions(s) that were opening on this server)");
222       
223       toAssignRegions.addAll(regionsInTransition);
224 
225       // Iterate regions that were on this server and assign them
226       if (hris != null && !hris.isEmpty()) {
227         RegionStates regionStates = am.getRegionStates();
228         for (HRegionInfo hri: hris) {
229           if (regionsInTransition.contains(hri)) {
230             continue;
231           }
232           String encodedName = hri.getEncodedName();
233           Lock lock = am.acquireRegionLock(encodedName);
234           try {
235             RegionState rit = regionStates.getRegionTransitionState(hri);
236             if (processDeadRegion(hri, am)) {
237               ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
238               if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
239                 // If this region is in transition on the dead server, it must be
240                 // opening or pending_open, which should have been covered by AM#processServerShutdown
241                 LOG.info("Skip assigning region " + hri.getRegionNameAsString()
242                   + " because it has been opened in " + addressFromAM.getServerName());
243                 continue;
244               }
245               if (rit != null) {
246                 if (rit.getServerName() != null && !rit.isOnServer(serverName)) {
247                   // Skip regions that are in transition on other server
248                   LOG.info("Skip assigning region in transition on other server" + rit);
249                   continue;
250                 }
251                 LOG.info("Reassigning region with rs = " + rit);
252                 regionStates.updateRegionState(hri, RegionState.State.OFFLINE);
253               } else if (regionStates.isRegionInState(
254                   hri, RegionState.State.SPLITTING_NEW, RegionState.State.MERGING_NEW)) {
255                 regionStates.updateRegionState(hri, RegionState.State.OFFLINE);
256               }
257               toAssignRegions.add(hri);
258             } else if (rit != null) {
259               if ((rit.isClosing() || rit.isFailedClose() || rit.isOffline())
260                   && am.getTableStateManager().isTableState(hri.getTable(),
261                   TableState.State.DISABLED, TableState.State.DISABLING) ||
262                   am.getReplicasToClose().contains(hri)) {
263                 // If the table was partially disabled and the RS went down, we should clear the RIT
264                 // and remove the node for the region.
265                 // The rit that we use may be stale in case the table was in DISABLING state
266                 // but though we did assign we will not be clearing the znode in CLOSING state.
267                 // Doing this will have no harm. See HBASE-5927
268                 regionStates.updateRegionState(hri, RegionState.State.OFFLINE);
269                 am.offlineDisabledRegion(hri);
270               } else {
271                 LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "
272                   + rit + " not to be assigned by SSH of server " + serverName);
273               }
274             }
275           } finally {
276             lock.unlock();
277           }
278         }
279       }
280 
281       try {
282         am.assign(toAssignRegions);
283       } catch (InterruptedException ie) {
284         LOG.error("Caught " + ie + " during round-robin assignment");
285         throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
286       } catch (IOException ioe) {
287         LOG.info("Caught " + ioe + " during region assignment, will retry");
288         // Only do wal splitting if shouldSplitWal and in DLR mode
289         serverManager.processDeadServer(serverName,
290           this.shouldSplitWal && distributedLogReplay);
291         return;
292       }
293 
294       if (this.shouldSplitWal && distributedLogReplay) {
295         // wait for region assignment completes
296         for (HRegionInfo hri : toAssignRegions) {
297           try {
298             if (!am.waitOnRegionToClearRegionsInTransition(hri, regionAssignmentWaitTimeout)) {
299               // Wait here is to avoid log replay hits current dead server and incur a RPC timeout
300               // when replay happens before region assignment completes.
301               LOG.warn("Region " + hri.getEncodedName()
302                   + " didn't complete assignment in time");
303             }
304           } catch (InterruptedException ie) {
305             throw new InterruptedIOException("Caught " + ie
306                 + " during waitOnRegionToClearRegionsInTransition");
307           }
308         }
309         // submit logReplay work
310         this.services.getExecutorService().submit(
311           new LogReplayHandler(this.server, this.services, this.deadServers, this.serverName));
312         hasLogReplayWork = true;
313       }
314     } finally {
315       this.deadServers.finish(serverName);
316     }
317 
318     if (!hasLogReplayWork) {
319       LOG.info("Finished processing of shutdown of " + serverName);
320     }
321   }
322 
323   private void resubmit(final ServerName serverName, IOException ex) throws IOException {
324     // typecast to SSH so that we make sure that it is the SSH instance that
325     // gets submitted as opposed to MSSH or some other derived instance of SSH
326     this.services.getExecutorService().submit((ServerShutdownHandler) this);
327     this.deadServers.add(serverName);
328     throw new IOException("failed log splitting for " + serverName + ", will retry", ex);
329   }
330 
331   /**
332    * Process a dead region from a dead RS. Checks if the region is disabled or
333    * disabling or if the region has a partially completed split.
334    * @param hri
335    * @param assignmentManager
336    * @return Returns true if specified region should be assigned, false if not.
337    * @throws IOException
338    */
339   public static boolean processDeadRegion(HRegionInfo hri,
340       AssignmentManager assignmentManager)
341   throws IOException {
342     boolean tablePresent = assignmentManager.getTableStateManager().isTablePresent(hri.getTable());
343     if (!tablePresent) {
344       LOG.info("The table " + hri.getTable()
345           + " was deleted.  Hence not proceeding.");
346       return false;
347     }
348     // If table is not disabled but the region is offlined,
349     boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
350       TableState.State.DISABLED);
351     if (disabled){
352       LOG.info("The table " + hri.getTable()
353           + " was disabled.  Hence not proceeding.");
354       return false;
355     }
356     if (hri.isOffline() && hri.isSplit()) {
357       //HBASE-7721: Split parent and daughters are inserted into hbase:meta as an atomic operation.
358       //If the meta scanner saw the parent split, then it should see the daughters as assigned
359       //to the dead server. We don't have to do anything.
360       return false;
361     }
362     boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
363       TableState.State.DISABLING);
364     if (disabling) {
365       LOG.info("The table " + hri.getTable()
366           + " is disabled.  Hence not assigning region" + hri.getEncodedName());
367       return false;
368     }
369     return true;
370   }
371 }