001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master.procedure;
019
020import java.io.IOException;
021import java.lang.Thread.UncaughtExceptionHandler;
022import java.net.ConnectException;
023import java.net.UnknownHostException;
024import java.util.List;
025import java.util.Set;
026import java.util.concurrent.TimeUnit;
027import javax.security.sasl.SaslException;
028import org.apache.hadoop.hbase.CallQueueTooBigException;
029import org.apache.hadoop.hbase.DoNotRetryIOException;
030import org.apache.hadoop.hbase.ServerName;
031import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
032import org.apache.hadoop.hbase.client.RegionInfo;
033import org.apache.hadoop.hbase.exceptions.ConnectionClosedException;
034import org.apache.hadoop.hbase.ipc.RpcConnectionConstants;
035import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
036import org.apache.hadoop.hbase.master.MasterServices;
037import org.apache.hadoop.hbase.master.ServerListener;
038import org.apache.hadoop.hbase.master.ServerManager;
039import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
040import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher;
041import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
042import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
043import org.apache.hadoop.hbase.util.FutureUtils;
044import org.apache.hadoop.ipc.RemoteException;
045import org.apache.yetus.audience.InterfaceAudience;
046import org.slf4j.Logger;
047import org.slf4j.LoggerFactory;
048
049import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
050import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
051
052import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
053import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
054import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
055import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
056import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
057import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
058import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RemoteProcedureRequest;
059
060/**
061 * A remote procecdure dispatcher for regionservers.
062 */
063@InterfaceAudience.Private
064public class RSProcedureDispatcher extends RemoteProcedureDispatcher<MasterProcedureEnv, ServerName>
065  implements ServerListener {
066  private static final Logger LOG = LoggerFactory.getLogger(RSProcedureDispatcher.class);
067
068  public static final String RS_RPC_STARTUP_WAIT_TIME_CONF_KEY =
069    "hbase.regionserver.rpc.startup.waittime";
070  private static final int DEFAULT_RS_RPC_STARTUP_WAIT_TIME = 60000;
071
072  protected final MasterServices master;
073  private final long rsStartupWaitTime;
074  private MasterProcedureEnv procedureEnv;
075
076  public RSProcedureDispatcher(final MasterServices master) {
077    super(master.getConfiguration());
078
079    this.master = master;
080    this.rsStartupWaitTime = master.getConfiguration().getLong(RS_RPC_STARTUP_WAIT_TIME_CONF_KEY,
081      DEFAULT_RS_RPC_STARTUP_WAIT_TIME);
082  }
083
084  @Override
085  protected UncaughtExceptionHandler getUncaughtExceptionHandler() {
086    return new UncaughtExceptionHandler() {
087
088      @Override
089      public void uncaughtException(Thread t, Throwable e) {
090        LOG.error("Unexpected error caught, this may cause the procedure to hang forever", e);
091      }
092    };
093  }
094
095  @Override
096  public boolean start() {
097    if (!super.start()) {
098      return false;
099    }
100    setTimeoutExecutorUncaughtExceptionHandler(this::abort);
101    if (master.isStopped()) {
102      LOG.debug("Stopped");
103      return false;
104    }
105    // Around startup, if failed, some of the below may be set back to null so NPE is possible.
106    ServerManager sm = master.getServerManager();
107    if (sm == null) {
108      LOG.debug("ServerManager is null");
109      return false;
110    }
111    sm.registerListener(this);
112    ProcedureExecutor<MasterProcedureEnv> pe = master.getMasterProcedureExecutor();
113    if (pe == null) {
114      LOG.debug("ProcedureExecutor is null");
115      return false;
116    }
117    this.procedureEnv = pe.getEnvironment();
118    if (this.procedureEnv == null) {
119      LOG.debug("ProcedureEnv is null; stopping={}", master.isStopping());
120      return false;
121    }
122    try {
123      for (ServerName serverName : sm.getOnlineServersList()) {
124        addNode(serverName);
125      }
126    } catch (Exception e) {
127      LOG.info("Failed start", e);
128      return false;
129    }
130    return true;
131  }
132
133  private void abort(Thread t, Throwable e) {
134    LOG.error("Caught error", e);
135    if (!master.isStopped() && !master.isStopping() && !master.isAborted()) {
136      master.abort("Aborting master", e);
137    }
138  }
139
140  @Override
141  public boolean stop() {
142    if (!super.stop()) {
143      return false;
144    }
145
146    master.getServerManager().unregisterListener(this);
147    return true;
148  }
149
150  @Override
151  protected void remoteDispatch(final ServerName serverName,
152    final Set<RemoteProcedure> remoteProcedures) {
153    if (!master.getServerManager().isServerOnline(serverName)) {
154      // fail fast
155      submitTask(new DeadRSRemoteCall(serverName, remoteProcedures));
156    } else {
157      submitTask(new ExecuteProceduresRemoteCall(serverName, remoteProcedures));
158    }
159  }
160
161  @Override
162  protected void abortPendingOperations(final ServerName serverName,
163    final Set<RemoteProcedure> operations) {
164    // TODO: Replace with a ServerNotOnlineException()
165    final IOException e = new DoNotRetryIOException("server not online " + serverName);
166    for (RemoteProcedure proc : operations) {
167      proc.remoteCallFailed(procedureEnv, serverName, e);
168    }
169  }
170
171  @Override
172  public void serverAdded(final ServerName serverName) {
173    addNode(serverName);
174  }
175
176  @Override
177  public void serverRemoved(final ServerName serverName) {
178    removeNode(serverName);
179  }
180
181  private interface RemoteProcedureResolver {
182    void dispatchOpenRequests(MasterProcedureEnv env, List<RegionOpenOperation> operations);
183
184    void dispatchCloseRequests(MasterProcedureEnv env, List<RegionCloseOperation> operations);
185
186    void dispatchServerOperations(MasterProcedureEnv env, List<ServerOperation> operations);
187  }
188
189  /**
190   * Fetches {@link org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation}s
191   * from the given {@code remoteProcedures} and groups them by class of the returned operation.
192   * Then {@code resolver} is used to dispatch {@link RegionOpenOperation}s and
193   * {@link RegionCloseOperation}s.
194   * @param serverName RegionServer to which the remote operations are sent
195   * @param operations Remote procedures which are dispatched to the given server
196   * @param resolver   Used to dispatch remote procedures to given server.
197   */
198  public void splitAndResolveOperation(ServerName serverName, Set<RemoteProcedure> operations,
199    RemoteProcedureResolver resolver) {
200    MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment();
201    ArrayListMultimap<Class<?>, RemoteOperation> reqsByType =
202      buildAndGroupRequestByType(env, serverName, operations);
203
204    List<RegionOpenOperation> openOps = fetchType(reqsByType, RegionOpenOperation.class);
205    if (!openOps.isEmpty()) {
206      resolver.dispatchOpenRequests(env, openOps);
207    }
208
209    List<RegionCloseOperation> closeOps = fetchType(reqsByType, RegionCloseOperation.class);
210    if (!closeOps.isEmpty()) {
211      resolver.dispatchCloseRequests(env, closeOps);
212    }
213
214    List<ServerOperation> refreshOps = fetchType(reqsByType, ServerOperation.class);
215    if (!refreshOps.isEmpty()) {
216      resolver.dispatchServerOperations(env, refreshOps);
217    }
218
219    if (!reqsByType.isEmpty()) {
220      LOG.warn("unknown request type in the queue: " + reqsByType);
221    }
222  }
223
224  private class DeadRSRemoteCall extends ExecuteProceduresRemoteCall {
225
226    public DeadRSRemoteCall(ServerName serverName, Set<RemoteProcedure> remoteProcedures) {
227      super(serverName, remoteProcedures);
228    }
229
230    @Override
231    public void run() {
232      remoteCallFailed(procedureEnv,
233        new RegionServerStoppedException("Server " + getServerName() + " is not online"));
234    }
235  }
236
237  // ==========================================================================
238  // Compatibility calls
239  // ==========================================================================
240  protected class ExecuteProceduresRemoteCall implements RemoteProcedureResolver, Runnable {
241
242    private final ServerName serverName;
243
244    private final Set<RemoteProcedure> remoteProcedures;
245
246    private int numberOfAttemptsSoFar = 0;
247    private long maxWaitTime = -1;
248
249    private final long rsRpcRetryInterval;
250    private static final String RS_RPC_RETRY_INTERVAL_CONF_KEY =
251      "hbase.regionserver.rpc.retry.interval";
252    private static final int DEFAULT_RS_RPC_RETRY_INTERVAL = 100;
253
254    /**
255     * Config to determine the retry limit while executing remote regionserver procedure. This retry
256     * limit applies to only specific errors. These errors could potentially get the remote
257     * procedure stuck for several minutes unless the retry limit is applied.
258     */
259    private static final String RS_REMOTE_PROC_FAIL_FAST_LIMIT =
260      "hbase.master.rs.remote.proc.fail.fast.limit";
261    /**
262     * The default retry limit. Waiting for more than {@value} attempts is not going to help much
263     * for genuine connectivity errors. Therefore, consider fail-fast after {@value} retries. Value
264     * = {@value}
265     */
266    private static final int DEFAULT_RS_REMOTE_PROC_RETRY_LIMIT = 5;
267
268    private final int failFastRetryLimit;
269
270    private ExecuteProceduresRequest.Builder request = null;
271
272    public ExecuteProceduresRemoteCall(final ServerName serverName,
273      final Set<RemoteProcedure> remoteProcedures) {
274      this.serverName = serverName;
275      this.remoteProcedures = remoteProcedures;
276      this.rsRpcRetryInterval = master.getConfiguration().getLong(RS_RPC_RETRY_INTERVAL_CONF_KEY,
277        DEFAULT_RS_RPC_RETRY_INTERVAL);
278      this.failFastRetryLimit = master.getConfiguration().getInt(RS_REMOTE_PROC_FAIL_FAST_LIMIT,
279        DEFAULT_RS_REMOTE_PROC_RETRY_LIMIT);
280    }
281
282    private AsyncRegionServerAdmin getRsAdmin() throws IOException {
283      return master.getAsyncClusterConnection().getRegionServerAdmin(serverName);
284    }
285
286    protected final ServerName getServerName() {
287      return serverName;
288    }
289
290    private boolean scheduleForRetry(IOException e) {
291      LOG.debug("Request to {} failed, try={}", serverName, numberOfAttemptsSoFar, e);
292      // Should we wait a little before retrying? If the server is starting it's yes.
293      if (e instanceof ServerNotRunningYetException) {
294        long remainingTime = getMaxWaitTime() - EnvironmentEdgeManager.currentTime();
295        if (remainingTime > 0) {
296          LOG.warn("Waiting a little before retrying {}, try={}, can wait up to {}ms", serverName,
297            numberOfAttemptsSoFar, remainingTime);
298          numberOfAttemptsSoFar++;
299          // Retry every rsRpcRetryInterval millis up to maximum wait time.
300          submitTask(this, rsRpcRetryInterval, TimeUnit.MILLISECONDS);
301          return true;
302        }
303        LOG.warn("{} is throwing ServerNotRunningYetException for {}ms; trying another server",
304          serverName, getMaxWaitTime());
305        return false;
306      }
307      if (e instanceof DoNotRetryIOException) {
308        LOG.warn("{} tells us DoNotRetry due to {}, try={}, give up", serverName, e.toString(),
309          numberOfAttemptsSoFar);
310        return false;
311      }
312      // This category of exceptions is thrown in the rpc framework, where we can make sure
313      // that the call has not been executed yet, so it is safe to mark it as fail.
314      // Especially for open a region, we'd better choose another region server.
315      // Notice that, it is safe to quit only if this is the first time we send request to region
316      // server. Maybe the region server has accepted our request the first time, and then there is
317      // a network error which prevents we receive the response, and the second time we hit
318      // this category of exceptions, obviously it is not safe to quit here, otherwise it may lead
319      // to a double assign...
320      if (numberOfAttemptsSoFar == 0 && unableToConnectToServer(e)) {
321        return false;
322      }
323
324      // Check if the num of attempts have crossed the retry limit, and if the error type can
325      // fail-fast.
326      if (numberOfAttemptsSoFar >= failFastRetryLimit - 1 && isErrorTypeFailFast(e)) {
327        LOG
328          .warn("Number of retries {} exceeded limit {} for the given error type. Scheduling server"
329            + " crash for {}", numberOfAttemptsSoFar + 1, failFastRetryLimit, serverName, e);
330        // Expiring the server will schedule SCP and also reject the regionserver report from the
331        // regionserver if regionserver is somehow able to send the regionserver report to master.
332        // The master rejects the report by throwing YouAreDeadException, which would eventually
333        // result in the regionserver abort.
334        // This will also remove "serverName" from the ServerManager's onlineServers map.
335        master.getServerManager().expireServer(serverName);
336        return false;
337      }
338      // Always retry for other exception types if the region server is not dead yet.
339      if (!master.getServerManager().isServerOnline(serverName)) {
340        LOG.warn("Request to {} failed due to {}, try={} and the server is not online, give up",
341          serverName, e.toString(), numberOfAttemptsSoFar);
342        return false;
343      }
344      if (e instanceof RegionServerStoppedException) {
345        // A better way is to return true here to let the upper layer quit, and then schedule a
346        // background task to check whether the region server is dead. And if it is dead, call
347        // remoteCallFailed to tell the upper layer. Keep retrying here does not lead to incorrect
348        // result, but waste some resources.
349        LOG.warn("{} is aborted or stopped, for safety we still need to"
350          + " wait until it is fully dead, try={}", serverName, numberOfAttemptsSoFar);
351      } else {
352        LOG.warn("request to {} failed due to {}, try={}, retrying... , request params: {}",
353          serverName, e.toString(), numberOfAttemptsSoFar, request.build());
354      }
355      numberOfAttemptsSoFar++;
356      // Add some backoff here as the attempts rise otherwise if a stuck condition, will fill logs
357      // with failed attempts. None of our backoff classes -- RetryCounter or ClientBackoffPolicy
358      // -- fit here nicely so just do something simple; increment by rsRpcRetryInterval millis *
359      // retry^2 on each try
360      // up to max of 10 seconds (don't want to back off too much in case of situation change).
361      submitTask(this,
362        Math.min(
363          rsRpcRetryInterval * ((long) this.numberOfAttemptsSoFar * this.numberOfAttemptsSoFar),
364          10 * 1000),
365        TimeUnit.MILLISECONDS);
366      return true;
367    }
368
369    /**
370     * The category of exceptions where we can ensure that the request has not yet been received
371     * and/or processed by the target regionserver yet and hence we can determine whether it is safe
372     * to choose different regionserver as the target.
373     * @param e IOException thrown by the underlying rpc framework.
374     * @return true if the exception belongs to the category where the regionserver has not yet
375     *         received the request yet.
376     */
377    private boolean unableToConnectToServer(IOException e) {
378      if (e instanceof CallQueueTooBigException) {
379        LOG.warn("request to {} failed due to {}, try={}, this usually because"
380          + " server is overloaded, give up", serverName, e, numberOfAttemptsSoFar);
381        return true;
382      }
383      if (isSaslError(e)) {
384        LOG.warn("{} is not reachable; give up after first attempt", serverName, e);
385        return true;
386      }
387      return false;
388    }
389
390    private boolean isSaslError(IOException e) {
391      Throwable cause = e;
392      while (true) {
393        if (cause instanceof IOException) {
394          IOException unwrappedCause = unwrapException((IOException) cause);
395          if (
396            unwrappedCause instanceof SaslException
397              || (unwrappedCause.getMessage() != null && unwrappedCause.getMessage()
398                .contains(RpcConnectionConstants.RELOGIN_IS_IN_PROGRESS))
399          ) {
400            return true;
401          }
402        }
403        cause = cause.getCause();
404        if (cause == null) {
405          return false;
406        }
407      }
408    }
409
410    /**
411     * Returns true if the error or its cause indicates a network connection issue.
412     * @param e IOException thrown by the underlying rpc framework.
413     * @return True if the error or its cause indicates a network connection issue.
414     */
415    private boolean isNetworkError(IOException e) {
416      if (
417        e instanceof ConnectionClosedException || e instanceof UnknownHostException
418          || e instanceof ConnectException
419      ) {
420        return true;
421      }
422      Throwable cause = e;
423      while (true) {
424        if (cause instanceof IOException) {
425          IOException unwrappedCause = unwrapException((IOException) cause);
426          if (
427            unwrappedCause instanceof ConnectionClosedException
428              || unwrappedCause instanceof UnknownHostException
429              || unwrappedCause instanceof ConnectException
430          ) {
431            return true;
432          }
433        }
434        cause = cause.getCause();
435        if (cause == null) {
436          return false;
437        }
438      }
439    }
440
441    /**
442     * Returns true if the error type can allow fail-fast.
443     * @param e IOException thrown by the underlying rpc framework.
444     * @return True if the error type can allow fail-fast.
445     */
446    private boolean isErrorTypeFailFast(IOException e) {
447      return e instanceof CallQueueTooBigException || isSaslError(e) || isNetworkError(e);
448    }
449
450    private long getMaxWaitTime() {
451      if (this.maxWaitTime < 0) {
452        // This is the max attempts, not retries, so it should be at least 1.
453        this.maxWaitTime = EnvironmentEdgeManager.currentTime() + rsStartupWaitTime;
454      }
455      return this.maxWaitTime;
456    }
457
458    private IOException unwrapException(IOException e) {
459      if (e instanceof RemoteException) {
460        e = ((RemoteException) e).unwrapRemoteException();
461      }
462      return e;
463    }
464
465    @Override
466    public void run() {
467      request = ExecuteProceduresRequest.newBuilder();
468      if (LOG.isTraceEnabled()) {
469        LOG.trace("Building request with operations count=" + remoteProcedures.size());
470      }
471      splitAndResolveOperation(getServerName(), remoteProcedures, this);
472
473      try {
474        sendRequest(getServerName(), request.build());
475      } catch (IOException e) {
476        e = unwrapException(e);
477        // TODO: In the future some operation may want to bail out early.
478        // TODO: How many times should we retry (use numberOfAttemptsSoFar)
479        if (!scheduleForRetry(e)) {
480          remoteCallFailed(procedureEnv, e);
481        }
482      }
483    }
484
485    @Override
486    public void dispatchOpenRequests(final MasterProcedureEnv env,
487      final List<RegionOpenOperation> operations) {
488      request.addOpenRegion(buildOpenRegionRequest(env, getServerName(), operations));
489    }
490
491    @Override
492    public void dispatchCloseRequests(final MasterProcedureEnv env,
493      final List<RegionCloseOperation> operations) {
494      for (RegionCloseOperation op : operations) {
495        request.addCloseRegion(op.buildCloseRegionRequest(getServerName()));
496      }
497    }
498
499    @Override
500    public void dispatchServerOperations(MasterProcedureEnv env, List<ServerOperation> operations) {
501      operations.stream().map(ServerOperation::buildRequest).forEachOrdered(request::addProc);
502    }
503
504    // will be overridden in test.
505    protected ExecuteProceduresResponse sendRequest(final ServerName serverName,
506      final ExecuteProceduresRequest request) throws IOException {
507      return FutureUtils.get(getRsAdmin().executeProcedures(request));
508    }
509
510    protected final void remoteCallFailed(final MasterProcedureEnv env, final IOException e) {
511      for (RemoteProcedure proc : remoteProcedures) {
512        proc.remoteCallFailed(env, getServerName(), e);
513      }
514    }
515  }
516
517  private static OpenRegionRequest buildOpenRegionRequest(final MasterProcedureEnv env,
518    final ServerName serverName, final List<RegionOpenOperation> operations) {
519    final OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
520    builder.setServerStartCode(serverName.getStartCode());
521    operations.stream().map(RemoteOperation::getInitiatingMasterActiveTime).findAny()
522      .ifPresent(builder::setInitiatingMasterActiveTime);
523    builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
524    for (RegionOpenOperation op : operations) {
525      builder.addOpenInfo(op.buildRegionOpenInfoRequest(env));
526    }
527    return builder.build();
528  }
529
530  // ==========================================================================
531  // RPC Messages
532  // - ServerOperation: refreshConfig, grant, revoke, ... (TODO)
533  // - RegionOperation: open, close, flush, snapshot, ...
534  // ==========================================================================
535
536  public static final class ServerOperation extends RemoteOperation {
537
538    private final long procId;
539
540    private final Class<?> rsProcClass;
541
542    private final byte[] rsProcData;
543
544    public ServerOperation(RemoteProcedure remoteProcedure, long procId, Class<?> rsProcClass,
545      byte[] rsProcData, long initiatingMasterActiveTime) {
546      super(remoteProcedure, initiatingMasterActiveTime);
547      this.procId = procId;
548      this.rsProcClass = rsProcClass;
549      this.rsProcData = rsProcData;
550    }
551
552    public RemoteProcedureRequest buildRequest() {
553      return RemoteProcedureRequest.newBuilder().setProcId(procId)
554        .setProcClass(rsProcClass.getName()).setProcData(ByteString.copyFrom(rsProcData))
555        .setInitiatingMasterActiveTime(getInitiatingMasterActiveTime()).build();
556    }
557  }
558
559  public static abstract class RegionOperation extends RemoteOperation {
560    protected final RegionInfo regionInfo;
561    protected final long procId;
562
563    protected RegionOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, long procId,
564      long initiatingMasterActiveTime) {
565      super(remoteProcedure, initiatingMasterActiveTime);
566      this.regionInfo = regionInfo;
567      this.procId = procId;
568    }
569  }
570
571  public static class RegionOpenOperation extends RegionOperation {
572
573    public RegionOpenOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, long procId,
574      long initiatingMasterActiveTime) {
575      super(remoteProcedure, regionInfo, procId, initiatingMasterActiveTime);
576    }
577
578    public OpenRegionRequest.RegionOpenInfo
579      buildRegionOpenInfoRequest(final MasterProcedureEnv env) {
580      return RequestConverter.buildRegionOpenInfo(regionInfo,
581        env.getAssignmentManager().getFavoredNodes(regionInfo), procId);
582    }
583  }
584
585  public static class RegionCloseOperation extends RegionOperation {
586    private final ServerName destinationServer;
587    private boolean evictCache;
588
589    public RegionCloseOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, long procId,
590      ServerName destinationServer, boolean evictCache, long initiatingMasterActiveTime) {
591      super(remoteProcedure, regionInfo, procId, initiatingMasterActiveTime);
592      this.destinationServer = destinationServer;
593      this.evictCache = evictCache;
594    }
595
596    public ServerName getDestinationServer() {
597      return destinationServer;
598    }
599
600    public CloseRegionRequest buildCloseRegionRequest(final ServerName serverName) {
601      return ProtobufUtil.buildCloseRegionRequest(serverName, regionInfo.getRegionName(),
602        getDestinationServer(), procId, evictCache, getInitiatingMasterActiveTime());
603    }
604  }
605}