View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.util.ArrayList;
24  import java.util.List;
25  import java.util.Set;
26  import java.util.concurrent.locks.Lock;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.classification.InterfaceAudience;
31  import org.apache.hadoop.hbase.HConstants;
32  import org.apache.hadoop.hbase.HRegionInfo;
33  import org.apache.hadoop.hbase.Server;
34  import org.apache.hadoop.hbase.ServerName;
35  import org.apache.hadoop.hbase.executor.EventHandler;
36  import org.apache.hadoop.hbase.executor.EventType;
37  import org.apache.hadoop.hbase.master.AssignmentManager;
38  import org.apache.hadoop.hbase.master.DeadServer;
39  import org.apache.hadoop.hbase.master.MasterFileSystem;
40  import org.apache.hadoop.hbase.master.MasterServices;
41  import org.apache.hadoop.hbase.master.RegionState;
42  import org.apache.hadoop.hbase.master.RegionState.State;
43  import org.apache.hadoop.hbase.master.RegionStates;
44  import org.apache.hadoop.hbase.master.ServerManager;
45  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
46  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
47  
48  /**
49   * Process server shutdown.
50   * Server-to-handle must be already in the deadservers lists.  See
51   * {@link ServerManager#expireServer(ServerName)}
52   */
53  @InterfaceAudience.Private
54  public class ServerShutdownHandler extends EventHandler {
55    private static final Log LOG = LogFactory.getLog(ServerShutdownHandler.class);
56    protected final ServerName serverName;
57    protected final MasterServices services;
58    protected final DeadServer deadServers;
59    protected final boolean shouldSplitHlog; // whether to split HLog or not
60    protected final int regionAssignmentWaitTimeout;
61  
62    public ServerShutdownHandler(final Server server, final MasterServices services,
63        final DeadServer deadServers, final ServerName serverName,
64        final boolean shouldSplitHlog) {
65      this(server, services, deadServers, serverName, EventType.M_SERVER_SHUTDOWN,
66          shouldSplitHlog);
67    }
68  
69    ServerShutdownHandler(final Server server, final MasterServices services,
70        final DeadServer deadServers, final ServerName serverName, EventType type,
71        final boolean shouldSplitHlog) {
72      super(server, type);
73      this.serverName = serverName;
74      this.server = server;
75      this.services = services;
76      this.deadServers = deadServers;
77      if (!this.deadServers.isDeadServer(this.serverName)) {
78        LOG.warn(this.serverName + " is NOT in deadservers; it should be!");
79      }
80      this.shouldSplitHlog = shouldSplitHlog;
81      this.regionAssignmentWaitTimeout = server.getConfiguration().getInt(
82        HConstants.LOG_REPLAY_WAIT_REGION_TIMEOUT, 15000);
83    }
84  
85    @Override
86    public String getInformativeName() {
87      if (serverName != null) {
88        return this.getClass().getSimpleName() + " for " + serverName;
89      } else {
90        return super.getInformativeName();
91      }
92    }
93  
94    /**
95     * @return True if the server we are processing was carrying <code>hbase:meta</code>
96     */
97    boolean isCarryingMeta() {
98      return false;
99    }
100 
101   @Override
102   public String toString() {
103     return getClass().getSimpleName() + "-" + serverName + "-" + getSeqid();
104   }
105 
106   @Override
107   public void process() throws IOException {
108     boolean hasLogReplayWork = false;
109     final ServerName serverName = this.serverName;
110     try {
111 
112       // We don't want worker thread in the MetaServerShutdownHandler
113       // executor pool to block by waiting availability of hbase:meta
114       // Otherwise, it could run into the following issue:
115       // 1. The current MetaServerShutdownHandler instance For RS1 waits for the hbase:meta
116       //    to come online.
117       // 2. The newly assigned hbase:meta region server RS2 was shutdown right after
118       //    it opens the hbase:meta region. So the MetaServerShutdownHandler
119       //    instance For RS1 will still be blocked.
120       // 3. The new instance of MetaServerShutdownHandler for RS2 is queued.
121       // 4. The newly assigned hbase:meta region server RS3 was shutdown right after
122       //    it opens the hbase:meta region. So the MetaServerShutdownHandler
123       //    instance For RS1 and RS2 will still be blocked.
124       // 5. The new instance of MetaServerShutdownHandler for RS3 is queued.
125       // 6. Repeat until we run out of MetaServerShutdownHandler worker threads
126       // The solution here is to resubmit a ServerShutdownHandler request to process
127       // user regions on that server so that MetaServerShutdownHandler
128       // executor pool is always available.
129       //
130       // If AssignmentManager hasn't finished rebuilding user regions,
131       // we are not ready to assign dead regions either. So we re-queue up
132       // the dead server for further processing too.
133       AssignmentManager am = services.getAssignmentManager();
134       if (isCarryingMeta() // hbase:meta
135           || !am.isFailoverCleanupDone()) {
136         this.services.getServerManager().processDeadServer(serverName, this.shouldSplitHlog);
137         return;
138       }
139 
140       // Wait on meta to come online; we need it to progress.
141       // TODO: Best way to hold strictly here?  We should build this retry logic
142       // into the MetaTableAccessor operations themselves.
143       // TODO: Is the reading of hbase:meta necessary when the Master has state of
144       // cluster in its head?  It should be possible to do without reading hbase:meta
145       // in all but one case. On split, the RS updates the hbase:meta
146       // table and THEN informs the master of the split via zk nodes in
147       // 'unassigned' dir.  Currently the RS puts ephemeral nodes into zk so if
148       // the regionserver dies, these nodes do not stick around and this server
149       // shutdown processing does fixup (see the fixupDaughters method below).
150       // If we wanted to skip the hbase:meta scan, we'd have to change at least the
151       // final SPLIT message to be permanent in zk so in here we'd know a SPLIT
152       // completed (zk is updated after edits to hbase:meta have gone in).  See
153       // {@link SplitTransaction}.  We'd also have to be figure another way for
154       // doing the below hbase:meta daughters fixup.
155       Set<HRegionInfo> hris = null;
156       while (!this.server.isStopped()) {
157         try {
158           server.getMetaTableLocator().waitMetaRegionLocation(server.getZooKeeper());
159           // Skip getting user regions if the server is stopped.
160           if (!this.server.isStopped()) {
161             hris = am.getRegionStates().getServerRegions(serverName);
162             if (hris != null) {
163               hris.remove(HRegionInfo.FIRST_META_REGIONINFO);
164             }
165           }
166           break;
167         } catch (InterruptedException e) {
168           Thread.currentThread().interrupt();
169           throw (InterruptedIOException)new InterruptedIOException().initCause(e);
170         }
171       }
172       if (this.server.isStopped()) {
173         throw new IOException("Server is stopped");
174       }
175 
176       // delayed to set recovery mode based on configuration only after all outstanding splitlogtask
177       // drained
178       this.services.getMasterFileSystem().setLogRecoveryMode();
179       boolean distributedLogReplay = 
180         (this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);
181 
182       try {
183         if (this.shouldSplitHlog) {
184           LOG.info("Splitting logs for " + serverName + " before assignment.");
185           if (distributedLogReplay) {
186             LOG.info("Mark regions in recovery before assignment.");
187             MasterFileSystem mfs = this.services.getMasterFileSystem();
188             mfs.prepareLogReplay(serverName, hris);
189           } else {
190             this.services.getMasterFileSystem().splitLog(serverName);
191           }
192           am.getRegionStates().logSplit(serverName);
193         } else {
194           LOG.info("Skipping log splitting for " + serverName);
195         }
196       } catch (IOException ioe) {
197         resubmit(serverName, ioe);
198       }
199 
200       // Clean out anything in regions in transition.  Being conservative and
201       // doing after log splitting.  Could do some states before -- OPENING?
202       // OFFLINE? -- and then others after like CLOSING that depend on log
203       // splitting.
204       List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
205       LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) +
206         " region(s) that " + (serverName == null? "null": serverName)  +
207         " was carrying (and " + regionsInTransition.size() +
208         " regions(s) that were opening on this server)");
209 
210       List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
211       toAssignRegions.addAll(regionsInTransition);
212 
213       // Iterate regions that were on this server and assign them
214       if (hris != null && !hris.isEmpty()) {
215         RegionStates regionStates = am.getRegionStates();
216         for (HRegionInfo hri: hris) {
217           if (regionsInTransition.contains(hri)) {
218             continue;
219           }
220           String encodedName = hri.getEncodedName();
221           Lock lock = am.acquireRegionLock(encodedName);
222           try {
223             RegionState rit = regionStates.getRegionTransitionState(hri);
224             if (processDeadRegion(hri, am)) {
225               ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
226               if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
227                 // If this region is in transition on the dead server, it must be
228                 // opening or pending_open, which should have been covered by AM#processServerShutdown
229                 LOG.info("Skip assigning region " + hri.getRegionNameAsString()
230                   + " because it has been opened in " + addressFromAM.getServerName());
231                 continue;
232               }
233               if (rit != null) {
234                 if (rit.getServerName() != null && !rit.isOnServer(serverName)) {
235                   // Skip regions that are in transition on other server
236                   LOG.info("Skip assigning region in transition on other server" + rit);
237                   continue;
238                 }
239                 LOG.info("Reassigning region with rs = " + rit);
240                 regionStates.updateRegionState(hri, State.OFFLINE);
241               } else if (regionStates.isRegionInState(
242                   hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
243                 regionStates.updateRegionState(hri, State.OFFLINE);
244               }
245               toAssignRegions.add(hri);
246             } else if (rit != null) {
247               if (rit.isPendingCloseOrClosing()
248                   && am.getTableStateManager().isTableState(hri.getTable(),
249                   ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
250                   am.getReplicasToClose().contains(hri)) {
251                 // If the table was partially disabled and the RS went down, we should clear the RIT
252                 // and remove the node for the region.
253                 // The rit that we use may be stale in case the table was in DISABLING state
254                 // but though we did assign we will not be clearing the znode in CLOSING state.
255                 // Doing this will have no harm. See HBASE-5927
256                 regionStates.updateRegionState(hri, State.OFFLINE);
257                 am.offlineDisabledRegion(hri);
258               } else {
259                 LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "
260                   + rit + " not to be assigned by SSH of server " + serverName);
261               }
262             }
263           } finally {
264             lock.unlock();
265           }
266         }
267       }
268 
269       try {
270         am.assign(toAssignRegions);
271       } catch (InterruptedException ie) {
272         LOG.error("Caught " + ie + " during round-robin assignment");
273         throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
274       }
275 
276       if (this.shouldSplitHlog && distributedLogReplay) {
277         // wait for region assignment completes
278         for (HRegionInfo hri : toAssignRegions) {
279           try {
280             if (!am.waitOnRegionToClearRegionsInTransition(hri, regionAssignmentWaitTimeout)) {
281               // Wait here is to avoid log replay hits current dead server and incur a RPC timeout
282               // when replay happens before region assignment completes.
283               LOG.warn("Region " + hri.getEncodedName()
284                   + " didn't complete assignment in time");
285             }
286           } catch (InterruptedException ie) {
287             throw new InterruptedIOException("Caught " + ie
288                 + " during waitOnRegionToClearRegionsInTransition");
289           }
290         }
291         // submit logReplay work
292         this.services.getExecutorService().submit(
293           new LogReplayHandler(this.server, this.services, this.deadServers, this.serverName));
294         hasLogReplayWork = true;
295       }
296     } finally {
297       this.deadServers.finish(serverName);
298     }
299 
300     if (!hasLogReplayWork) {
301       LOG.info("Finished processing of shutdown of " + serverName);
302     }
303   }
304 
305   private void resubmit(final ServerName serverName, IOException ex) throws IOException {
306     // typecast to SSH so that we make sure that it is the SSH instance that
307     // gets submitted as opposed to MSSH or some other derived instance of SSH
308     this.services.getExecutorService().submit((ServerShutdownHandler) this);
309     this.deadServers.add(serverName);
310     throw new IOException("failed log splitting for " + serverName + ", will retry", ex);
311   }
312 
313   /**
314    * Process a dead region from a dead RS. Checks if the region is disabled or
315    * disabling or if the region has a partially completed split.
316    * @param hri
317    * @param assignmentManager
318    * @return Returns true if specified region should be assigned, false if not.
319    * @throws IOException
320    */
321   public static boolean processDeadRegion(HRegionInfo hri,
322       AssignmentManager assignmentManager)
323   throws IOException {
324     boolean tablePresent = assignmentManager.getTableStateManager().isTablePresent(hri.getTable());
325     if (!tablePresent) {
326       LOG.info("The table " + hri.getTable()
327           + " was deleted.  Hence not proceeding.");
328       return false;
329     }
330     // If table is not disabled but the region is offlined,
331     boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
332       ZooKeeperProtos.Table.State.DISABLED);
333     if (disabled){
334       LOG.info("The table " + hri.getTable()
335           + " was disabled.  Hence not proceeding.");
336       return false;
337     }
338     if (hri.isOffline() && hri.isSplit()) {
339       //HBASE-7721: Split parent and daughters are inserted into hbase:meta as an atomic operation.
340       //If the meta scanner saw the parent split, then it should see the daughters as assigned
341       //to the dead server. We don't have to do anything.
342       return false;
343     }
344     boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
345       ZooKeeperProtos.Table.State.DISABLING);
346     if (disabling) {
347       LOG.info("The table " + hri.getTable()
348           + " is disabled.  Hence not assigning region" + hri.getEncodedName());
349       return false;
350     }
351     return true;
352   }
353 }