001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master.procedure;
019import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK;
020import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK;
021import java.io.IOException;
022import java.util.ArrayList;
023import java.util.Arrays;
024import java.util.List;
025import org.apache.hadoop.hbase.DoNotRetryIOException;
026import org.apache.hadoop.hbase.ServerName;
027import org.apache.hadoop.hbase.client.RegionInfo;
028import org.apache.hadoop.hbase.client.RegionInfoBuilder;
029import org.apache.hadoop.hbase.client.RegionReplicaUtil;
030import org.apache.hadoop.hbase.client.TableState;
031import org.apache.hadoop.hbase.master.MasterServices;
032import org.apache.hadoop.hbase.master.MasterWalManager;
033import org.apache.hadoop.hbase.master.SplitWALManager;
034import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
035import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
036import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
037import org.apache.hadoop.hbase.monitoring.MonitoredTask;
038import org.apache.hadoop.hbase.monitoring.TaskMonitor;
039import org.apache.hadoop.hbase.procedure2.Procedure;
040import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
041import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
042import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
043import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
044import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
045import org.apache.yetus.audience.InterfaceAudience;
046import org.slf4j.Logger;
047import org.slf4j.LoggerFactory;
048import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
049import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
050import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashState;
051
052/**
053 * Handle crashed server. This is a port to ProcedureV2 of what used to be euphemistically called
054 * ServerShutdownHandler.
055 *
056 * <p>The procedure flow varies dependent on whether meta is assigned and if we are to split logs.
057 *
058 * <p>We come in here after ServerManager has noticed a server has expired. Procedures
059 * queued on the rpc should have been notified about fail and should be concurrently
060 * getting themselves ready to assign elsewhere.
061 */
062@InterfaceAudience.Private
063public class ServerCrashProcedure
064    extends StateMachineProcedure<MasterProcedureEnv, ServerCrashState>
065    implements ServerProcedureInterface {
066  private static final Logger LOG = LoggerFactory.getLogger(ServerCrashProcedure.class);
067
068  /**
069   * Name of the crashed server to process.
070   */
071  private ServerName serverName;
072
073  /**
074   * Whether DeadServer knows that we are processing it.
075   */
076  private boolean notifiedDeadServer = false;
077
078  /**
079   * Regions that were on the crashed server.
080   */
081  private List<RegionInfo> regionsOnCrashedServer;
082
083  private boolean carryingMeta = false;
084  private boolean shouldSplitWal;
085  private MonitoredTask status;
086  // currentRunningState is updated when ServerCrashProcedure get scheduled, child procedures update
087  // progress will not update the state because the actual state is overwritten by its next state
088  private ServerCrashState currentRunningState = getInitialState();
089
090  /**
091   * Call this constructor queuing up a Procedure.
092   * @param serverName Name of the crashed server.
093   * @param shouldSplitWal True if we should split WALs as part of crashed server processing.
094   * @param carryingMeta True if carrying hbase:meta table region.
095   */
096  public ServerCrashProcedure(final MasterProcedureEnv env, final ServerName serverName,
097      final boolean shouldSplitWal, final boolean carryingMeta) {
098    this.serverName = serverName;
099    this.shouldSplitWal = shouldSplitWal;
100    this.carryingMeta = carryingMeta;
101    this.setOwner(env.getRequestUser());
102  }
103
104  /**
105   * Used when deserializing from a procedure store; we'll construct one of these then call
106   * #deserializeStateData(InputStream). Do not use directly.
107   */
108  public ServerCrashProcedure() {
109  }
110
111  public boolean isInRecoverMetaState() {
112    return getCurrentState() == ServerCrashState.SERVER_CRASH_PROCESS_META;
113  }
114
115  @Override
116  protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState state)
117      throws ProcedureSuspendedException, ProcedureYieldException {
118    final MasterServices services = env.getMasterServices();
119    final AssignmentManager am = env.getAssignmentManager();
120    updateProgress(true);
121    // HBASE-14802 If we have not yet notified that we are processing a dead server, do so now.
122    // This adds server to the DeadServer processing list but not to the DeadServers list.
123    // Server gets removed from processing list below on procedure successful finish.
124    if (!notifiedDeadServer) {
125      services.getServerManager().getDeadServers().processing(serverName);
126      notifiedDeadServer = true;
127    }
128
129    switch (state) {
130      case SERVER_CRASH_START:
131      case SERVER_CRASH_SPLIT_META_LOGS:
132      case SERVER_CRASH_DELETE_SPLIT_META_WALS_DIR:
133      case SERVER_CRASH_ASSIGN_META:
134        break;
135      default:
136        // If hbase:meta is not assigned, yield.
137        if (env.getAssignmentManager().waitMetaLoaded(this)) {
138          throw new ProcedureSuspendedException();
139        }
140    }
141    try {
142      switch (state) {
143        case SERVER_CRASH_START:
144          LOG.info("Start " + this);
145          // If carrying meta, process it first. Else, get list of regions on crashed server.
146          if (this.carryingMeta) {
147            setNextState(ServerCrashState.SERVER_CRASH_SPLIT_META_LOGS);
148          } else {
149            setNextState(ServerCrashState.SERVER_CRASH_GET_REGIONS);
150          }
151          break;
152        case SERVER_CRASH_SPLIT_META_LOGS:
153          if (env.getMasterConfiguration().getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK,
154              DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) {
155            zkCoordinatedSplitMetaLogs(env);
156            setNextState(ServerCrashState.SERVER_CRASH_ASSIGN_META);
157          } else {
158            am.getRegionStates().metaLogSplitting(serverName);
159            addChildProcedure(createSplittingWalProcedures(env, true));
160            setNextState(ServerCrashState.SERVER_CRASH_DELETE_SPLIT_META_WALS_DIR);
161          }
162          break;
163        case SERVER_CRASH_DELETE_SPLIT_META_WALS_DIR:
164          if (isSplittingDone(env, true)) {
165            setNextState(ServerCrashState.SERVER_CRASH_ASSIGN_META);
166            am.getRegionStates().metaLogSplit(serverName);
167          } else {
168            setNextState(ServerCrashState.SERVER_CRASH_SPLIT_META_LOGS);
169          }
170          break;
171        case SERVER_CRASH_ASSIGN_META:
172          assignRegions(env, Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO));
173          setNextState(ServerCrashState.SERVER_CRASH_GET_REGIONS);
174          break;
175        case SERVER_CRASH_GET_REGIONS:
176          this.regionsOnCrashedServer = getRegionsOnCrashedServer(env);
177          // Where to go next? Depends on whether we should split logs at all or
178          // if we should do distributed log splitting.
179          if (regionsOnCrashedServer != null) {
180            LOG.info("{} had {} regions", serverName, regionsOnCrashedServer.size());
181            if (LOG.isTraceEnabled()) {
182              this.regionsOnCrashedServer.stream().forEach(ri -> LOG.trace(ri.getShortNameToLog()));
183            }
184          }
185          if (!this.shouldSplitWal) {
186            setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
187          } else {
188            setNextState(ServerCrashState.SERVER_CRASH_SPLIT_LOGS);
189          }
190          break;
191        case SERVER_CRASH_SPLIT_LOGS:
192          if (env.getMasterConfiguration().getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK,
193            DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) {
194            zkCoordinatedSplitLogs(env);
195            setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
196          } else {
197            am.getRegionStates().logSplitting(this.serverName);
198            addChildProcedure(createSplittingWalProcedures(env, false));
199            setNextState(ServerCrashState.SERVER_CRASH_DELETE_SPLIT_WALS_DIR);
200          }
201          break;
202        case SERVER_CRASH_DELETE_SPLIT_WALS_DIR:
203          if (isSplittingDone(env, false)) {
204            cleanupSplitDir(env);
205            setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
206            am.getRegionStates().logSplit(this.serverName);
207          } else {
208            setNextState(ServerCrashState.SERVER_CRASH_SPLIT_LOGS);
209          }
210          break;
211        case SERVER_CRASH_ASSIGN:
212          // If no regions to assign, skip assign and skip to the finish.
213          // Filter out meta regions. Those are handled elsewhere in this procedure.
214          // Filter changes this.regionsOnCrashedServer.
215          if (filterDefaultMetaRegions()) {
216            if (LOG.isTraceEnabled()) {
217              LOG
218                .trace("Assigning regions " + RegionInfo.getShortNameToLog(regionsOnCrashedServer) +
219                  ", " + this + "; cycles=" + getCycles());
220            }
221            assignRegions(env, regionsOnCrashedServer);
222          }
223          setNextState(ServerCrashState.SERVER_CRASH_FINISH);
224          break;
225        case SERVER_CRASH_HANDLE_RIT2:
226          // Noop. Left in place because we used to call handleRIT here for a second time
227          // but no longer necessary since HBASE-20634.
228          setNextState(ServerCrashState.SERVER_CRASH_FINISH);
229          break;
230        case SERVER_CRASH_FINISH:
231          LOG.info("removed crashed server {} after splitting done", serverName);
232          services.getAssignmentManager().getRegionStates().removeServer(serverName);
233          services.getServerManager().getDeadServers().finish(serverName);
234          updateProgress(true);
235          return Flow.NO_MORE_STATE;
236        default:
237          throw new UnsupportedOperationException("unhandled state=" + state);
238      }
239    } catch (IOException e) {
240      LOG.warn("Failed state=" + state + ", retry " + this + "; cycles=" + getCycles(), e);
241    }
242    return Flow.HAS_MORE_STATE;
243  }
244
245  /**
246   * @return List of Regions on crashed server.
247   */
248  List<RegionInfo> getRegionsOnCrashedServer(MasterProcedureEnv env) {
249    return env.getMasterServices().getAssignmentManager().getRegionsOnServer(serverName);
250  }
251
252  private void cleanupSplitDir(MasterProcedureEnv env) {
253    SplitWALManager splitWALManager = env.getMasterServices().getSplitWALManager();
254    try {
255      if (!this.carryingMeta) {
256        // If we are NOT carrying hbase:meta, check if any left-over hbase:meta WAL files from an
257        // old hbase:meta tenancy on this server; clean these up if any before trying to remove the
258        // WAL directory of this server or we will fail. See archiveMetaLog comment for more details
259        // on this condition.
260        env.getMasterServices().getMasterWalManager().archiveMetaLog(this.serverName);
261      }
262      splitWALManager.deleteWALDir(serverName);
263    } catch (IOException e) {
264      LOG.warn("Remove WAL directory for {} failed, ignore...{}", serverName, e.getMessage());
265    }
266  }
267
268  private boolean isSplittingDone(MasterProcedureEnv env, boolean splitMeta) {
269    SplitWALManager splitWALManager = env.getMasterServices().getSplitWALManager();
270    try {
271      int wals = splitWALManager.getWALsToSplit(serverName, splitMeta).size();
272      LOG.debug("Check if {} WAL splitting is done? wals={}, meta={}", serverName, wals, splitMeta);
273      return wals == 0;
274    } catch (IOException e) {
275      LOG.warn("Get WALs of {} failed, retry...", serverName, e);
276      return false;
277    }
278  }
279
280  private Procedure[] createSplittingWalProcedures(MasterProcedureEnv env, boolean splitMeta)
281      throws IOException {
282    LOG.info("Splitting WALs {}, isMeta: {}", this, splitMeta);
283    SplitWALManager splitWALManager = env.getMasterServices().getSplitWALManager();
284    List<Procedure> procedures = splitWALManager.splitWALs(serverName, splitMeta);
285    return procedures.toArray(new Procedure[procedures.size()]);
286  }
287
288  private boolean filterDefaultMetaRegions() {
289    if (regionsOnCrashedServer == null) {
290      return false;
291    }
292    regionsOnCrashedServer.removeIf(this::isDefaultMetaRegion);
293    return !regionsOnCrashedServer.isEmpty();
294  }
295
296  private boolean isDefaultMetaRegion(RegionInfo hri) {
297    return hri.isMetaRegion() && RegionReplicaUtil.isDefaultReplica(hri);
298  }
299
300  /**
301   * Split hbase:meta logs using 'classic' zk-based coordination.
302   * Superceded by procedure-based WAL splitting.
303   * @see #createSplittingWalProcedures(MasterProcedureEnv, boolean)
304   */
305  private void zkCoordinatedSplitMetaLogs(MasterProcedureEnv env) throws IOException {
306    LOG.debug("Splitting meta WALs {}", this);
307    MasterWalManager mwm = env.getMasterServices().getMasterWalManager();
308    AssignmentManager am = env.getMasterServices().getAssignmentManager();
309    am.getRegionStates().metaLogSplitting(serverName);
310    mwm.splitMetaLog(serverName);
311    am.getRegionStates().metaLogSplit(serverName);
312    LOG.debug("Done splitting meta WALs {}", this);
313  }
314
315  /**
316   * Split logs using 'classic' zk-based coordination.
317   * Superceded by procedure-based WAL splitting.
318   * @see #createSplittingWalProcedures(MasterProcedureEnv, boolean)
319   */
320  private void zkCoordinatedSplitLogs(final MasterProcedureEnv env) throws IOException {
321    LOG.debug("Splitting WALs {}", this);
322    MasterWalManager mwm = env.getMasterServices().getMasterWalManager();
323    AssignmentManager am = env.getMasterServices().getAssignmentManager();
324    // TODO: For Matteo. Below BLOCKs!!!! Redo so can relinquish executor while it is running.
325    // PROBLEM!!! WE BLOCK HERE. Can block for hours if hundreds of WALs to split and hundreds
326    // of SCPs running because big cluster crashed down.
327    am.getRegionStates().logSplitting(this.serverName);
328    mwm.splitLog(this.serverName);
329    if (!carryingMeta) {
330      mwm.archiveMetaLog(this.serverName);
331    }
332    am.getRegionStates().logSplit(this.serverName);
333    LOG.debug("Done splitting WALs {}", this);
334  }
335
336  void updateProgress(boolean updateState) {
337    String msg = "Processing ServerCrashProcedure of " + serverName;
338    if (status == null) {
339      status = TaskMonitor.get().createStatus(msg);
340      return;
341    }
342    if (currentRunningState == ServerCrashState.SERVER_CRASH_FINISH) {
343      status.markComplete(msg + " done");
344      return;
345    }
346    if (updateState) {
347      currentRunningState = getCurrentState();
348    }
349    int childrenLatch = getChildrenLatch();
350    status.setStatus(msg + " current State " + currentRunningState + (childrenLatch > 0?
351      "; remaining num of running child procedures = " + childrenLatch: ""));
352  }
353
354  @Override
355  protected void rollbackState(MasterProcedureEnv env, ServerCrashState state) throws IOException {
356    // Can't rollback.
357    throw new UnsupportedOperationException("unhandled state=" + state);
358  }
359
360  @Override
361  protected ServerCrashState getState(int stateId) {
362    return ServerCrashState.forNumber(stateId);
363  }
364
365  @Override
366  protected int getStateId(ServerCrashState state) {
367    return state.getNumber();
368  }
369
370  @Override
371  protected ServerCrashState getInitialState() {
372    return ServerCrashState.SERVER_CRASH_START;
373  }
374
375  @Override
376  protected boolean abort(MasterProcedureEnv env) {
377    // TODO
378    return false;
379  }
380
381  @Override
382  protected LockState acquireLock(final MasterProcedureEnv env) {
383    if (env.getProcedureScheduler().waitServerExclusiveLock(this, getServerName())) {
384      return LockState.LOCK_EVENT_WAIT;
385    }
386    return LockState.LOCK_ACQUIRED;
387  }
388
389  @Override
390  protected void releaseLock(final MasterProcedureEnv env) {
391    env.getProcedureScheduler().wakeServerExclusiveLock(this, getServerName());
392  }
393
394  @Override
395  public void toStringClassDetails(StringBuilder sb) {
396    sb.append(getProcName());
397    sb.append(", splitWal=");
398    sb.append(shouldSplitWal);
399    sb.append(", meta=");
400    sb.append(carryingMeta);
401  }
402
403  @Override public String getProcName() {
404    return getClass().getSimpleName() + " " + this.serverName;
405  }
406
407  @Override
408  protected void serializeStateData(ProcedureStateSerializer serializer)
409      throws IOException {
410    super.serializeStateData(serializer);
411
412    MasterProcedureProtos.ServerCrashStateData.Builder state =
413      MasterProcedureProtos.ServerCrashStateData.newBuilder().
414      setServerName(ProtobufUtil.toServerName(this.serverName)).
415      setCarryingMeta(this.carryingMeta).
416      setShouldSplitWal(this.shouldSplitWal);
417    if (this.regionsOnCrashedServer != null && !this.regionsOnCrashedServer.isEmpty()) {
418      for (RegionInfo hri: this.regionsOnCrashedServer) {
419        state.addRegionsOnCrashedServer(ProtobufUtil.toRegionInfo(hri));
420      }
421    }
422    serializer.serialize(state.build());
423  }
424
425  @Override
426  protected void deserializeStateData(ProcedureStateSerializer serializer)
427      throws IOException {
428    super.deserializeStateData(serializer);
429
430    MasterProcedureProtos.ServerCrashStateData state =
431        serializer.deserialize(MasterProcedureProtos.ServerCrashStateData.class);
432    this.serverName = ProtobufUtil.toServerName(state.getServerName());
433    this.carryingMeta = state.hasCarryingMeta()? state.getCarryingMeta(): false;
434    // shouldSplitWAL has a default over in pb so this invocation will always work.
435    this.shouldSplitWal = state.getShouldSplitWal();
436    int size = state.getRegionsOnCrashedServerCount();
437    if (size > 0) {
438      this.regionsOnCrashedServer = new ArrayList<>(size);
439      for (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo ri:
440          state.getRegionsOnCrashedServerList()) {
441        this.regionsOnCrashedServer.add(ProtobufUtil.toRegionInfo(ri));
442      }
443    }
444    updateProgress(false);
445  }
446
447  @Override
448  public ServerName getServerName() {
449    return this.serverName;
450  }
451
452  @Override
453  public boolean hasMetaTableRegion() {
454    return this.carryingMeta;
455  }
456
457  @Override
458  public ServerOperationType getServerOperationType() {
459    return ServerOperationType.CRASH_HANDLER;
460  }
461
462
463  @Override
464  protected boolean shouldWaitClientAck(MasterProcedureEnv env) {
465    // The operation is triggered internally on the server
466    // the client does not know about this procedure.
467    return false;
468  }
469
470  /**
471   * Moved out here so can be overridden by the HBCK fix-up SCP to be less strict about what
472   * it will tolerate as a 'match'.
473   * @return True if the region location in <code>rsn</code> matches that of this crashed server.
474   */
475  protected boolean isMatchingRegionLocation(RegionStateNode rsn) {
476    return this.serverName.equals(rsn.getRegionLocation());
477  }
478
479  /**
480   * Assign the regions on the crashed RS to other Rses.
481   * <p/>
482   * In this method we will go through all the RegionStateNodes of the give regions to find out
483   * whether there is already an TRSP for the region, if so we interrupt it and let it retry on
484   * other server, otherwise we will schedule a TRSP to bring the region online.
485   * <p/>
486   * We will also check whether the table for a region is enabled, if not, we will skip assigning
487   * it.
488   */
489  private void assignRegions(MasterProcedureEnv env, List<RegionInfo> regions) throws IOException {
490    AssignmentManager am = env.getMasterServices().getAssignmentManager();
491    for (RegionInfo region : regions) {
492      RegionStateNode regionNode = am.getRegionStates().getOrCreateRegionStateNode(region);
493      regionNode.lock();
494      try {
495        // This is possible, as when a server is dead, TRSP will fail to schedule a RemoteProcedure
496        // and then try to assign the region to a new RS. And before it has updated the region
497        // location to the new RS, we may have already called the am.getRegionsOnServer so we will
498        // consider the region is still on this crashed server. Then before we arrive here, the
499        // TRSP could have updated the region location, or even finished itself, so the region is
500        // no longer on this crashed server any more. We should not try to assign it again. Please
501        // see HBASE-23594 for more details.
502        // UPDATE: HBCKServerCrashProcedure overrides isMatchingRegionLocation; this check can get
503        // in the way of our clearing out 'Unknown Servers'.
504        if (!isMatchingRegionLocation(regionNode)) {
505          // See HBASE-24117, though we have already changed the shutdown order, it is still worth
506          // double checking here to confirm that we do not skip assignment incorrectly.
507          if (!am.isRunning()) {
508            throw new DoNotRetryIOException(
509              "AssignmentManager has been stopped, can not process assignment any more");
510          }
511          LOG.info("{} found {} whose regionLocation no longer matches {}, skipping assign...",
512            this, regionNode, serverName);
513          continue;
514        }
515        if (regionNode.getProcedure() != null) {
516          LOG.info("{} found RIT {}; {}", this, regionNode.getProcedure(), regionNode);
517          regionNode.getProcedure().serverCrashed(env, regionNode, getServerName());
518          continue;
519        }
520        if (env.getMasterServices().getTableStateManager()
521          .isTableState(regionNode.getTable(), TableState.State.DISABLING)) {
522          // We need to change the state here otherwise the TRSP scheduled by DTP will try to
523          // close the region from a dead server and will never succeed. Please see HBASE-23636
524          // for more details.
525          env.getAssignmentManager().regionClosedAbnormally(regionNode);
526          LOG.info("{} found table disabling for region {}, set it state to ABNORMALLY_CLOSED.",
527            this, regionNode);
528          continue;
529        }
530        if (env.getMasterServices().getTableStateManager()
531          .isTableState(regionNode.getTable(), TableState.State.DISABLED)) {
532          // This should not happen, table disabled but has regions on server.
533          LOG.warn("Found table disabled for region {}, procDetails: {}", regionNode, this);
534          continue;
535        }
536        // force to assign to a new candidate server, see HBASE-23035 for more details.
537        TransitRegionStateProcedure proc =
538          TransitRegionStateProcedure.assign(env, region, true, null);
539        regionNode.setProcedure(proc);
540        addChildProcedure(proc);
541      } finally {
542        regionNode.unlock();
543      }
544    }
545  }
546
547  @Override
548  protected ProcedureMetrics getProcedureMetrics(MasterProcedureEnv env) {
549    return env.getMasterServices().getMasterMetrics().getServerCrashProcMetrics();
550  }
551
552  @Override
553  protected boolean holdLock(MasterProcedureEnv env) {
554    return true;
555  }
556
557  public static void updateProgress(MasterProcedureEnv env, long parentId) {
558    if (parentId == NO_PROC_ID) {
559      return;
560    }
561    Procedure parentProcedure =
562        env.getMasterServices().getMasterProcedureExecutor().getProcedure(parentId);
563    if (parentProcedure != null && parentProcedure instanceof ServerCrashProcedure) {
564      ((ServerCrashProcedure) parentProcedure).updateProgress(false);
565    }
566  }
567}