1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.master.handler;
20
21 import java.io.IOException;
22 import java.io.InterruptedIOException;
23 import java.util.ArrayList;
24 import java.util.List;
25 import java.util.Set;
26 import java.util.concurrent.locks.Lock;
27
28 import org.apache.commons.logging.Log;
29 import org.apache.commons.logging.LogFactory;
30 import org.apache.hadoop.hbase.HConstants;
31 import org.apache.hadoop.hbase.HRegionInfo;
32 import org.apache.hadoop.hbase.MetaTableAccessor;
33 import org.apache.hadoop.hbase.Server;
34 import org.apache.hadoop.hbase.ServerName;
35 import org.apache.hadoop.hbase.classification.InterfaceAudience;
36 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
37 import org.apache.hadoop.hbase.executor.EventHandler;
38 import org.apache.hadoop.hbase.executor.EventType;
39 import org.apache.hadoop.hbase.master.AssignmentManager;
40 import org.apache.hadoop.hbase.master.DeadServer;
41 import org.apache.hadoop.hbase.master.MasterFileSystem;
42 import org.apache.hadoop.hbase.master.MasterServices;
43 import org.apache.hadoop.hbase.master.RegionState;
44 import org.apache.hadoop.hbase.master.RegionState.State;
45 import org.apache.hadoop.hbase.master.RegionStates;
46 import org.apache.hadoop.hbase.master.ServerManager;
47 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
48 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
49 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
50 import org.apache.hadoop.hbase.util.ConfigUtil;
51 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
52 import org.apache.zookeeper.KeeperException;
53
54
55
56
57
58
59 @InterfaceAudience.Private
60 public class ServerShutdownHandler extends EventHandler {
61 private static final Log LOG = LogFactory.getLog(ServerShutdownHandler.class);
62 protected final ServerName serverName;
63 protected final MasterServices services;
64 protected final DeadServer deadServers;
65 protected final boolean shouldSplitWal;
66 protected final int regionAssignmentWaitTimeout;
67
68 public ServerShutdownHandler(final Server server, final MasterServices services,
69 final DeadServer deadServers, final ServerName serverName,
70 final boolean shouldSplitWal) {
71 this(server, services, deadServers, serverName, EventType.M_SERVER_SHUTDOWN,
72 shouldSplitWal);
73 }
74
75 ServerShutdownHandler(final Server server, final MasterServices services,
76 final DeadServer deadServers, final ServerName serverName, EventType type,
77 final boolean shouldSplitWal) {
78 super(server, type);
79 this.serverName = serverName;
80 this.server = server;
81 this.services = services;
82 this.deadServers = deadServers;
83 if (!this.deadServers.isDeadServer(this.serverName)) {
84 LOG.warn(this.serverName + " is NOT in deadservers; it should be!");
85 }
86 this.shouldSplitWal = shouldSplitWal;
87 this.regionAssignmentWaitTimeout = server.getConfiguration().getInt(
88 HConstants.LOG_REPLAY_WAIT_REGION_TIMEOUT, 15000);
89 }
90
91 @Override
92 public String getInformativeName() {
93 if (serverName != null) {
94 return this.getClass().getSimpleName() + " for " + serverName;
95 } else {
96 return super.getInformativeName();
97 }
98 }
99
100
101
102
103 boolean isCarryingMeta() {
104 return false;
105 }
106
107 @Override
108 public String toString() {
109 return getClass().getSimpleName() + "-" + serverName + "-" + getSeqid();
110 }
111
112 @Override
113 public void process() throws IOException {
114 boolean hasLogReplayWork = false;
115 final ServerName serverName = this.serverName;
116 try {
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139 AssignmentManager am = services.getAssignmentManager();
140 ServerManager serverManager = services.getServerManager();
141 if (isCarryingMeta()
142 serverManager.processDeadServer(serverName, this.shouldSplitWal);
143 return;
144 }
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161 Set<HRegionInfo> hris = null;
162 while (!this.server.isStopped()) {
163 try {
164 server.getMetaTableLocator().waitMetaRegionLocation(server.getZooKeeper());
165 if (BaseLoadBalancer.tablesOnMaster(server.getConfiguration())) {
166 while (!this.server.isStopped() && serverManager.countOfRegionServers() < 2) {
167
168
169
170
171 Thread.sleep(100);
172 }
173 }
174
175 if (!this.server.isStopped()) {
176 if (ConfigUtil.useZKForAssignment(server.getConfiguration())) {
177 hris = MetaTableAccessor.getServerUserRegions(this.server.getConnection(),
178 this.serverName).keySet();
179 } else {
180
181 hris = am.getRegionStates().getServerRegions(serverName);
182 }
183 }
184 break;
185 } catch (InterruptedException e) {
186 Thread.currentThread().interrupt();
187 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
188 } catch (IOException ioe) {
189 LOG.info("Received exception accessing hbase:meta during server shutdown of " +
190 serverName + ", retrying hbase:meta read", ioe);
191 }
192 }
193 if (this.server.isStopped()) {
194 throw new IOException("Server is stopped");
195 }
196
197
198
199 this.services.getMasterFileSystem().setLogRecoveryMode();
200 boolean distributedLogReplay =
201 (this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);
202
203 try {
204 if (this.shouldSplitWal) {
205 if (distributedLogReplay) {
206 LOG.info("Mark regions in recovery for crashed server " + serverName +
207 " before assignment; regions=" + hris);
208 MasterFileSystem mfs = this.services.getMasterFileSystem();
209 mfs.prepareLogReplay(serverName, hris);
210 } else {
211 LOG.info("Splitting logs for " + serverName +
212 " before assignment; region count=" + (hris == null ? 0 : hris.size()));
213 this.services.getMasterFileSystem().splitLog(serverName);
214 }
215 am.getRegionStates().logSplit(serverName);
216 } else {
217 LOG.info("Skipping log splitting for " + serverName);
218 }
219 } catch (IOException ioe) {
220 resubmit(serverName, ioe);
221 }
222 List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
223 int replicaCount = services.getConfiguration().getInt(HConstants.META_REPLICAS_NUM,
224 HConstants.DEFAULT_META_REPLICA_NUM);
225 for (int i = 1; i < replicaCount; i++) {
226 HRegionInfo metaHri =
227 RegionReplicaUtil.getRegionInfoForReplica(HRegionInfo.FIRST_META_REGIONINFO, i);
228 if (am.isCarryingMetaReplica(serverName, metaHri) ==
229 AssignmentManager.ServerHostRegion.HOSTING_REGION) {
230 LOG.info("Reassigning meta replica" + metaHri + " that was on " + serverName);
231 toAssignRegions.add(metaHri);
232 }
233 }
234
235
236
237
238 List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
239 LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) +
240 " region(s) that " + (serverName == null? "null": serverName) +
241 " was carrying (and " + regionsInTransition.size() +
242 " regions(s) that were opening on this server)");
243
244 toAssignRegions.addAll(regionsInTransition);
245
246
247 if (hris != null && !hris.isEmpty()) {
248 RegionStates regionStates = am.getRegionStates();
249 for (HRegionInfo hri: hris) {
250 if (regionsInTransition.contains(hri)) {
251 continue;
252 }
253 String encodedName = hri.getEncodedName();
254 Lock lock = am.acquireRegionLock(encodedName);
255 try {
256 RegionState rit = regionStates.getRegionTransitionState(hri);
257 if (processDeadRegion(hri, am)) {
258 ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
259 if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
260
261
262 LOG.info("Skip assigning region " + hri.getRegionNameAsString()
263 + " because it has been opened in " + addressFromAM.getServerName());
264 continue;
265 }
266 if (rit != null) {
267 if (rit.getServerName() != null && !rit.isOnServer(serverName)) {
268
269 LOG.info("Skip assigning region in transition on other server" + rit);
270 continue;
271 }
272 try{
273
274 LOG.info("Reassigning region with rs = " + rit + " and deleting zk node if exists");
275 ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), hri);
276 regionStates.updateRegionState(hri, State.OFFLINE);
277 } catch (KeeperException ke) {
278 this.server.abort("Unexpected ZK exception deleting unassigned node " + hri, ke);
279 return;
280 }
281 } else if (regionStates.isRegionInState(
282 hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
283 regionStates.updateRegionState(hri, State.OFFLINE);
284 }
285 toAssignRegions.add(hri);
286 } else if (rit != null) {
287 if ((rit.isPendingCloseOrClosing() || rit.isOffline())
288 && am.getTableStateManager().isTableState(hri.getTable(),
289 ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
290 am.getReplicasToClose().contains(hri)) {
291
292
293
294
295
296 regionStates.updateRegionState(hri, State.OFFLINE);
297 am.deleteClosingOrClosedNode(hri, rit.getServerName());
298 am.offlineDisabledRegion(hri);
299 } else {
300 LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "
301 + rit + " not to be assigned by SSH of server " + serverName);
302 }
303 }
304 } finally {
305 lock.unlock();
306 }
307 }
308 }
309
310 try {
311 am.assign(toAssignRegions);
312 } catch (InterruptedException ie) {
313 LOG.error("Caught " + ie + " during round-robin assignment");
314 throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
315 } catch (IOException ioe) {
316 LOG.info("Caught " + ioe + " during region assignment, will retry");
317
318 serverManager.processDeadServer(serverName,
319 this.shouldSplitWal && distributedLogReplay);
320 return;
321 }
322
323 if (this.shouldSplitWal && distributedLogReplay) {
324
325 for (HRegionInfo hri : toAssignRegions) {
326 try {
327 if (!am.waitOnRegionToClearRegionsInTransition(hri, regionAssignmentWaitTimeout)) {
328
329
330 LOG.warn("Region " + hri.getEncodedName()
331 + " didn't complete assignment in time");
332 }
333 } catch (InterruptedException ie) {
334 throw new InterruptedIOException("Caught " + ie
335 + " during waitOnRegionToClearRegionsInTransition");
336 }
337 }
338
339 this.services.getExecutorService().submit(
340 new LogReplayHandler(this.server, this.services, this.deadServers, this.serverName));
341 hasLogReplayWork = true;
342 }
343 } finally {
344 this.deadServers.finish(serverName);
345 }
346
347 if (!hasLogReplayWork) {
348 LOG.info("Finished processing of shutdown of " + serverName);
349 }
350 }
351
352 private void resubmit(final ServerName serverName, IOException ex) throws IOException {
353
354
355 this.services.getExecutorService().submit((ServerShutdownHandler) this);
356 this.deadServers.add(serverName);
357 throw new IOException("failed log splitting for " + serverName + ", will retry", ex);
358 }
359
360
361
362
363
364
365
366
367
368 public static boolean processDeadRegion(HRegionInfo hri,
369 AssignmentManager assignmentManager)
370 throws IOException {
371 boolean tablePresent = assignmentManager.getTableStateManager().isTablePresent(hri.getTable());
372 if (!tablePresent) {
373 LOG.info("The table " + hri.getTable()
374 + " was deleted. Hence not proceeding.");
375 return false;
376 }
377
378 boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
379 ZooKeeperProtos.Table.State.DISABLED);
380 if (disabled){
381 LOG.info("The table " + hri.getTable()
382 + " was disabled. Hence not proceeding.");
383 return false;
384 }
385 if (hri.isOffline() && hri.isSplit()) {
386
387
388
389 return false;
390 }
391 boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
392 ZooKeeperProtos.Table.State.DISABLING);
393 if (disabling) {
394 LOG.info("The table " + hri.getTable()
395 + " is disabled. Hence not assigning region" + hri.getEncodedName());
396 return false;
397 }
398 return true;
399 }
400 }