001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master.procedure; 019 020import java.io.IOException; 021import java.lang.Thread.UncaughtExceptionHandler; 022import java.net.UnknownHostException; 023import java.util.List; 024import java.util.Set; 025import java.util.concurrent.TimeUnit; 026import javax.security.sasl.SaslException; 027import org.apache.hadoop.hbase.CallQueueTooBigException; 028import org.apache.hadoop.hbase.DoNotRetryIOException; 029import org.apache.hadoop.hbase.ServerName; 030import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; 031import org.apache.hadoop.hbase.client.RegionInfo; 032import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; 033import org.apache.hadoop.hbase.ipc.RpcConnectionConstants; 034import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; 035import org.apache.hadoop.hbase.master.MasterServices; 036import org.apache.hadoop.hbase.master.ServerListener; 037import org.apache.hadoop.hbase.master.ServerManager; 038import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; 039import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher; 040import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; 041import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 042import org.apache.hadoop.hbase.util.FutureUtils; 043import org.apache.hadoop.ipc.RemoteException; 044import org.apache.yetus.audience.InterfaceAudience; 045import org.slf4j.Logger; 046import org.slf4j.LoggerFactory; 047 048import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; 049import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; 050 051import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 052import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; 053import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; 054import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest; 055import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; 056import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; 057import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RemoteProcedureRequest; 058 059/** 060 * A remote procecdure dispatcher for regionservers. 061 */ 062@InterfaceAudience.Private 063public class RSProcedureDispatcher extends RemoteProcedureDispatcher<MasterProcedureEnv, ServerName> 064 implements ServerListener { 065 private static final Logger LOG = LoggerFactory.getLogger(RSProcedureDispatcher.class); 066 067 public static final String RS_RPC_STARTUP_WAIT_TIME_CONF_KEY = 068 "hbase.regionserver.rpc.startup.waittime"; 069 private static final int DEFAULT_RS_RPC_STARTUP_WAIT_TIME = 60000; 070 071 protected final MasterServices master; 072 private final long rsStartupWaitTime; 073 private MasterProcedureEnv procedureEnv; 074 075 public RSProcedureDispatcher(final MasterServices master) { 076 super(master.getConfiguration()); 077 078 this.master = master; 079 this.rsStartupWaitTime = master.getConfiguration().getLong(RS_RPC_STARTUP_WAIT_TIME_CONF_KEY, 080 DEFAULT_RS_RPC_STARTUP_WAIT_TIME); 081 } 082 083 @Override 084 protected UncaughtExceptionHandler getUncaughtExceptionHandler() { 085 return new UncaughtExceptionHandler() { 086 087 @Override 088 public void uncaughtException(Thread t, Throwable e) { 089 LOG.error("Unexpected error caught, this may cause the procedure to hang forever", e); 090 } 091 }; 092 } 093 094 @Override 095 public boolean start() { 096 if (!super.start()) { 097 return false; 098 } 099 setTimeoutExecutorUncaughtExceptionHandler(this::abort); 100 if (master.isStopped()) { 101 LOG.debug("Stopped"); 102 return false; 103 } 104 // Around startup, if failed, some of the below may be set back to null so NPE is possible. 105 ServerManager sm = master.getServerManager(); 106 if (sm == null) { 107 LOG.debug("ServerManager is null"); 108 return false; 109 } 110 sm.registerListener(this); 111 ProcedureExecutor<MasterProcedureEnv> pe = master.getMasterProcedureExecutor(); 112 if (pe == null) { 113 LOG.debug("ProcedureExecutor is null"); 114 return false; 115 } 116 this.procedureEnv = pe.getEnvironment(); 117 if (this.procedureEnv == null) { 118 LOG.debug("ProcedureEnv is null; stopping={}", master.isStopping()); 119 return false; 120 } 121 try { 122 for (ServerName serverName : sm.getOnlineServersList()) { 123 addNode(serverName); 124 } 125 } catch (Exception e) { 126 LOG.info("Failed start", e); 127 return false; 128 } 129 return true; 130 } 131 132 private void abort(Thread t, Throwable e) { 133 LOG.error("Caught error", e); 134 if (!master.isStopped() && !master.isStopping() && !master.isAborted()) { 135 master.abort("Aborting master", e); 136 } 137 } 138 139 @Override 140 public boolean stop() { 141 if (!super.stop()) { 142 return false; 143 } 144 145 master.getServerManager().unregisterListener(this); 146 return true; 147 } 148 149 @Override 150 protected void remoteDispatch(final ServerName serverName, 151 final Set<RemoteProcedure> remoteProcedures) { 152 if (!master.getServerManager().isServerOnline(serverName)) { 153 // fail fast 154 submitTask(new DeadRSRemoteCall(serverName, remoteProcedures)); 155 } else { 156 submitTask(new ExecuteProceduresRemoteCall(serverName, remoteProcedures)); 157 } 158 } 159 160 @Override 161 protected void abortPendingOperations(final ServerName serverName, 162 final Set<RemoteProcedure> operations) { 163 // TODO: Replace with a ServerNotOnlineException() 164 final IOException e = new DoNotRetryIOException("server not online " + serverName); 165 for (RemoteProcedure proc : operations) { 166 proc.remoteCallFailed(procedureEnv, serverName, e); 167 } 168 } 169 170 @Override 171 public void serverAdded(final ServerName serverName) { 172 addNode(serverName); 173 } 174 175 @Override 176 public void serverRemoved(final ServerName serverName) { 177 removeNode(serverName); 178 } 179 180 private interface RemoteProcedureResolver { 181 void dispatchOpenRequests(MasterProcedureEnv env, List<RegionOpenOperation> operations); 182 183 void dispatchCloseRequests(MasterProcedureEnv env, List<RegionCloseOperation> operations); 184 185 void dispatchServerOperations(MasterProcedureEnv env, List<ServerOperation> operations); 186 } 187 188 /** 189 * Fetches {@link org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation}s 190 * from the given {@code remoteProcedures} and groups them by class of the returned operation. 191 * Then {@code resolver} is used to dispatch {@link RegionOpenOperation}s and 192 * {@link RegionCloseOperation}s. 193 * @param serverName RegionServer to which the remote operations are sent 194 * @param operations Remote procedures which are dispatched to the given server 195 * @param resolver Used to dispatch remote procedures to given server. 196 */ 197 public void splitAndResolveOperation(ServerName serverName, Set<RemoteProcedure> operations, 198 RemoteProcedureResolver resolver) { 199 MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment(); 200 ArrayListMultimap<Class<?>, RemoteOperation> reqsByType = 201 buildAndGroupRequestByType(env, serverName, operations); 202 203 List<RegionOpenOperation> openOps = fetchType(reqsByType, RegionOpenOperation.class); 204 if (!openOps.isEmpty()) { 205 resolver.dispatchOpenRequests(env, openOps); 206 } 207 208 List<RegionCloseOperation> closeOps = fetchType(reqsByType, RegionCloseOperation.class); 209 if (!closeOps.isEmpty()) { 210 resolver.dispatchCloseRequests(env, closeOps); 211 } 212 213 List<ServerOperation> refreshOps = fetchType(reqsByType, ServerOperation.class); 214 if (!refreshOps.isEmpty()) { 215 resolver.dispatchServerOperations(env, refreshOps); 216 } 217 218 if (!reqsByType.isEmpty()) { 219 LOG.warn("unknown request type in the queue: " + reqsByType); 220 } 221 } 222 223 private class DeadRSRemoteCall extends ExecuteProceduresRemoteCall { 224 225 public DeadRSRemoteCall(ServerName serverName, Set<RemoteProcedure> remoteProcedures) { 226 super(serverName, remoteProcedures); 227 } 228 229 @Override 230 public void run() { 231 remoteCallFailed(procedureEnv, 232 new RegionServerStoppedException("Server " + getServerName() + " is not online")); 233 } 234 } 235 236 // ========================================================================== 237 // Compatibility calls 238 // ========================================================================== 239 protected class ExecuteProceduresRemoteCall implements RemoteProcedureResolver, Runnable { 240 241 private final ServerName serverName; 242 243 private final Set<RemoteProcedure> remoteProcedures; 244 245 private int numberOfAttemptsSoFar = 0; 246 private long maxWaitTime = -1; 247 248 private final long rsRpcRetryInterval; 249 private static final String RS_RPC_RETRY_INTERVAL_CONF_KEY = 250 "hbase.regionserver.rpc.retry.interval"; 251 private static final int DEFAULT_RS_RPC_RETRY_INTERVAL = 100; 252 253 /** 254 * Config to determine the retry limit while executing remote regionserver procedure. This retry 255 * limit applies to only specific errors. These errors could potentially get the remote 256 * procedure stuck for several minutes unless the retry limit is applied. 257 */ 258 private static final String RS_REMOTE_PROC_FAIL_FAST_LIMIT = 259 "hbase.master.rs.remote.proc.fail.fast.limit"; 260 /** 261 * The default retry limit. Waiting for more than {@value} attempts is not going to help much 262 * for genuine connectivity errors. Therefore, consider fail-fast after {@value} retries. Value 263 * = {@value} 264 */ 265 private static final int DEFAULT_RS_REMOTE_PROC_RETRY_LIMIT = 5; 266 267 private final int failFastRetryLimit; 268 269 private ExecuteProceduresRequest.Builder request = null; 270 271 public ExecuteProceduresRemoteCall(final ServerName serverName, 272 final Set<RemoteProcedure> remoteProcedures) { 273 this.serverName = serverName; 274 this.remoteProcedures = remoteProcedures; 275 this.rsRpcRetryInterval = master.getConfiguration().getLong(RS_RPC_RETRY_INTERVAL_CONF_KEY, 276 DEFAULT_RS_RPC_RETRY_INTERVAL); 277 this.failFastRetryLimit = master.getConfiguration().getInt(RS_REMOTE_PROC_FAIL_FAST_LIMIT, 278 DEFAULT_RS_REMOTE_PROC_RETRY_LIMIT); 279 } 280 281 private AsyncRegionServerAdmin getRsAdmin() throws IOException { 282 return master.getAsyncClusterConnection().getRegionServerAdmin(serverName); 283 } 284 285 protected final ServerName getServerName() { 286 return serverName; 287 } 288 289 private boolean scheduleForRetry(IOException e) { 290 LOG.debug("Request to {} failed, try={}", serverName, numberOfAttemptsSoFar, e); 291 // Should we wait a little before retrying? If the server is starting it's yes. 292 if (e instanceof ServerNotRunningYetException) { 293 long remainingTime = getMaxWaitTime() - EnvironmentEdgeManager.currentTime(); 294 if (remainingTime > 0) { 295 LOG.warn("Waiting a little before retrying {}, try={}, can wait up to {}ms", serverName, 296 numberOfAttemptsSoFar, remainingTime); 297 numberOfAttemptsSoFar++; 298 // Retry every rsRpcRetryInterval millis up to maximum wait time. 299 submitTask(this, rsRpcRetryInterval, TimeUnit.MILLISECONDS); 300 return true; 301 } 302 LOG.warn("{} is throwing ServerNotRunningYetException for {}ms; trying another server", 303 serverName, getMaxWaitTime()); 304 return false; 305 } 306 if (e instanceof DoNotRetryIOException) { 307 LOG.warn("{} tells us DoNotRetry due to {}, try={}, give up", serverName, e.toString(), 308 numberOfAttemptsSoFar); 309 return false; 310 } 311 // This category of exceptions is thrown in the rpc framework, where we can make sure 312 // that the call has not been executed yet, so it is safe to mark it as fail. 313 // Especially for open a region, we'd better choose another region server. 314 // Notice that, it is safe to quit only if this is the first time we send request to region 315 // server. Maybe the region server has accepted our request the first time, and then there is 316 // a network error which prevents we receive the response, and the second time we hit 317 // this category of exceptions, obviously it is not safe to quit here, otherwise it may lead 318 // to a double assign... 319 if (numberOfAttemptsSoFar == 0 && unableToConnectToServer(e)) { 320 return false; 321 } 322 323 // Check if the num of attempts have crossed the retry limit, and if the error type can 324 // fail-fast. 325 if (numberOfAttemptsSoFar >= failFastRetryLimit - 1 && isErrorTypeFailFast(e)) { 326 LOG 327 .warn("Number of retries {} exceeded limit {} for the given error type. Scheduling server" 328 + " crash for {}", numberOfAttemptsSoFar + 1, failFastRetryLimit, serverName, e); 329 // Expiring the server will schedule SCP and also reject the regionserver report from the 330 // regionserver if regionserver is somehow able to send the regionserver report to master. 331 // The master rejects the report by throwing YouAreDeadException, which would eventually 332 // result in the regionserver abort. 333 // This will also remove "serverName" from the ServerManager's onlineServers map. 334 master.getServerManager().expireServer(serverName); 335 return false; 336 } 337 // Always retry for other exception types if the region server is not dead yet. 338 if (!master.getServerManager().isServerOnline(serverName)) { 339 LOG.warn("Request to {} failed due to {}, try={} and the server is not online, give up", 340 serverName, e.toString(), numberOfAttemptsSoFar); 341 return false; 342 } 343 if (e instanceof RegionServerStoppedException) { 344 // A better way is to return true here to let the upper layer quit, and then schedule a 345 // background task to check whether the region server is dead. And if it is dead, call 346 // remoteCallFailed to tell the upper layer. Keep retrying here does not lead to incorrect 347 // result, but waste some resources. 348 LOG.warn("{} is aborted or stopped, for safety we still need to" 349 + " wait until it is fully dead, try={}", serverName, numberOfAttemptsSoFar); 350 } else { 351 LOG.warn("request to {} failed due to {}, try={}, retrying... , request params: {}", 352 serverName, e.toString(), numberOfAttemptsSoFar, request.build()); 353 } 354 numberOfAttemptsSoFar++; 355 // Add some backoff here as the attempts rise otherwise if a stuck condition, will fill logs 356 // with failed attempts. None of our backoff classes -- RetryCounter or ClientBackoffPolicy 357 // -- fit here nicely so just do something simple; increment by rsRpcRetryInterval millis * 358 // retry^2 on each try 359 // up to max of 10 seconds (don't want to back off too much in case of situation change). 360 submitTask(this, 361 Math.min( 362 rsRpcRetryInterval * ((long) this.numberOfAttemptsSoFar * this.numberOfAttemptsSoFar), 363 10 * 1000), 364 TimeUnit.MILLISECONDS); 365 return true; 366 } 367 368 /** 369 * The category of exceptions where we can ensure that the request has not yet been received 370 * and/or processed by the target regionserver yet and hence we can determine whether it is safe 371 * to choose different regionserver as the target. 372 * @param e IOException thrown by the underlying rpc framework. 373 * @return true if the exception belongs to the category where the regionserver has not yet 374 * received the request yet. 375 */ 376 private boolean unableToConnectToServer(IOException e) { 377 if (e instanceof CallQueueTooBigException) { 378 LOG.warn("request to {} failed due to {}, try={}, this usually because" 379 + " server is overloaded, give up", serverName, e, numberOfAttemptsSoFar); 380 return true; 381 } 382 if (isSaslError(e)) { 383 LOG.warn("{} is not reachable; give up after first attempt", serverName, e); 384 return true; 385 } 386 return false; 387 } 388 389 private boolean isSaslError(IOException e) { 390 Throwable cause = e; 391 while (true) { 392 if (cause instanceof IOException) { 393 IOException unwrappedCause = unwrapException((IOException) cause); 394 if ( 395 unwrappedCause instanceof SaslException 396 || (unwrappedCause.getMessage() != null && unwrappedCause.getMessage() 397 .contains(RpcConnectionConstants.RELOGIN_IS_IN_PROGRESS)) 398 ) { 399 return true; 400 } 401 } 402 cause = cause.getCause(); 403 if (cause == null) { 404 return false; 405 } 406 } 407 } 408 409 /** 410 * Returns true if the error or its cause indicates a network connection issue. 411 * @param e IOException thrown by the underlying rpc framework. 412 * @return True if the error or its cause indicates a network connection issue. 413 */ 414 private boolean isNetworkError(IOException e) { 415 if (e instanceof ConnectionClosedException || e instanceof UnknownHostException) { 416 return true; 417 } 418 Throwable cause = e; 419 while (true) { 420 if (cause instanceof IOException) { 421 IOException unwrappedCause = unwrapException((IOException) cause); 422 if ( 423 unwrappedCause instanceof ConnectionClosedException 424 || unwrappedCause instanceof UnknownHostException 425 ) { 426 return true; 427 } 428 } 429 cause = cause.getCause(); 430 if (cause == null) { 431 return false; 432 } 433 } 434 } 435 436 /** 437 * Returns true if the error type can allow fail-fast. 438 * @param e IOException thrown by the underlying rpc framework. 439 * @return True if the error type can allow fail-fast. 440 */ 441 private boolean isErrorTypeFailFast(IOException e) { 442 return e instanceof CallQueueTooBigException || isSaslError(e) || isNetworkError(e); 443 } 444 445 private long getMaxWaitTime() { 446 if (this.maxWaitTime < 0) { 447 // This is the max attempts, not retries, so it should be at least 1. 448 this.maxWaitTime = EnvironmentEdgeManager.currentTime() + rsStartupWaitTime; 449 } 450 return this.maxWaitTime; 451 } 452 453 private IOException unwrapException(IOException e) { 454 if (e instanceof RemoteException) { 455 e = ((RemoteException) e).unwrapRemoteException(); 456 } 457 return e; 458 } 459 460 @Override 461 public void run() { 462 request = ExecuteProceduresRequest.newBuilder(); 463 if (LOG.isTraceEnabled()) { 464 LOG.trace("Building request with operations count=" + remoteProcedures.size()); 465 } 466 splitAndResolveOperation(getServerName(), remoteProcedures, this); 467 468 try { 469 sendRequest(getServerName(), request.build()); 470 } catch (IOException e) { 471 e = unwrapException(e); 472 // TODO: In the future some operation may want to bail out early. 473 // TODO: How many times should we retry (use numberOfAttemptsSoFar) 474 if (!scheduleForRetry(e)) { 475 remoteCallFailed(procedureEnv, e); 476 } 477 } 478 } 479 480 @Override 481 public void dispatchOpenRequests(final MasterProcedureEnv env, 482 final List<RegionOpenOperation> operations) { 483 request.addOpenRegion(buildOpenRegionRequest(env, getServerName(), operations)); 484 } 485 486 @Override 487 public void dispatchCloseRequests(final MasterProcedureEnv env, 488 final List<RegionCloseOperation> operations) { 489 for (RegionCloseOperation op : operations) { 490 request.addCloseRegion(op.buildCloseRegionRequest(getServerName())); 491 } 492 } 493 494 @Override 495 public void dispatchServerOperations(MasterProcedureEnv env, List<ServerOperation> operations) { 496 operations.stream().map(ServerOperation::buildRequest).forEachOrdered(request::addProc); 497 } 498 499 // will be overridden in test. 500 protected ExecuteProceduresResponse sendRequest(final ServerName serverName, 501 final ExecuteProceduresRequest request) throws IOException { 502 return FutureUtils.get(getRsAdmin().executeProcedures(request)); 503 } 504 505 protected final void remoteCallFailed(final MasterProcedureEnv env, final IOException e) { 506 for (RemoteProcedure proc : remoteProcedures) { 507 proc.remoteCallFailed(env, getServerName(), e); 508 } 509 } 510 } 511 512 private static OpenRegionRequest buildOpenRegionRequest(final MasterProcedureEnv env, 513 final ServerName serverName, final List<RegionOpenOperation> operations) { 514 final OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); 515 builder.setServerStartCode(serverName.getStartCode()); 516 operations.stream().map(RemoteOperation::getInitiatingMasterActiveTime).findAny() 517 .ifPresent(builder::setInitiatingMasterActiveTime); 518 builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime()); 519 for (RegionOpenOperation op : operations) { 520 builder.addOpenInfo(op.buildRegionOpenInfoRequest(env)); 521 } 522 return builder.build(); 523 } 524 525 // ========================================================================== 526 // RPC Messages 527 // - ServerOperation: refreshConfig, grant, revoke, ... (TODO) 528 // - RegionOperation: open, close, flush, snapshot, ... 529 // ========================================================================== 530 531 public static final class ServerOperation extends RemoteOperation { 532 533 private final long procId; 534 535 private final Class<?> rsProcClass; 536 537 private final byte[] rsProcData; 538 539 public ServerOperation(RemoteProcedure remoteProcedure, long procId, Class<?> rsProcClass, 540 byte[] rsProcData, long initiatingMasterActiveTime) { 541 super(remoteProcedure, initiatingMasterActiveTime); 542 this.procId = procId; 543 this.rsProcClass = rsProcClass; 544 this.rsProcData = rsProcData; 545 } 546 547 public RemoteProcedureRequest buildRequest() { 548 return RemoteProcedureRequest.newBuilder().setProcId(procId) 549 .setProcClass(rsProcClass.getName()).setProcData(ByteString.copyFrom(rsProcData)) 550 .setInitiatingMasterActiveTime(getInitiatingMasterActiveTime()).build(); 551 } 552 } 553 554 public static abstract class RegionOperation extends RemoteOperation { 555 protected final RegionInfo regionInfo; 556 protected final long procId; 557 558 protected RegionOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, long procId, 559 long initiatingMasterActiveTime) { 560 super(remoteProcedure, initiatingMasterActiveTime); 561 this.regionInfo = regionInfo; 562 this.procId = procId; 563 } 564 } 565 566 public static class RegionOpenOperation extends RegionOperation { 567 568 public RegionOpenOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, long procId, 569 long initiatingMasterActiveTime) { 570 super(remoteProcedure, regionInfo, procId, initiatingMasterActiveTime); 571 } 572 573 public OpenRegionRequest.RegionOpenInfo 574 buildRegionOpenInfoRequest(final MasterProcedureEnv env) { 575 return RequestConverter.buildRegionOpenInfo(regionInfo, 576 env.getAssignmentManager().getFavoredNodes(regionInfo), procId); 577 } 578 } 579 580 public static class RegionCloseOperation extends RegionOperation { 581 private final ServerName destinationServer; 582 private boolean evictCache; 583 584 public RegionCloseOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, long procId, 585 ServerName destinationServer, boolean evictCache, long initiatingMasterActiveTime) { 586 super(remoteProcedure, regionInfo, procId, initiatingMasterActiveTime); 587 this.destinationServer = destinationServer; 588 this.evictCache = evictCache; 589 } 590 591 public ServerName getDestinationServer() { 592 return destinationServer; 593 } 594 595 public CloseRegionRequest buildCloseRegionRequest(final ServerName serverName) { 596 return ProtobufUtil.buildCloseRegionRequest(serverName, regionInfo.getRegionName(), 597 getDestinationServer(), procId, evictCache, getInitiatingMasterActiveTime()); 598 } 599 } 600}