001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import static java.util.stream.Collectors.toList;
021import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
022import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
023import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
024
025import java.io.IOException;
026import java.lang.reflect.UndeclaredThrowableException;
027import java.net.InetAddress;
028import java.net.UnknownHostException;
029import java.util.Arrays;
030import java.util.List;
031import java.util.Optional;
032import java.util.concurrent.CompletableFuture;
033import java.util.concurrent.ExecutorService;
034import java.util.concurrent.ThreadLocalRandom;
035import java.util.concurrent.TimeUnit;
036import java.util.concurrent.atomic.AtomicReference;
037import java.util.function.Function;
038import java.util.function.Predicate;
039import java.util.function.Supplier;
040import org.apache.hadoop.conf.Configuration;
041import org.apache.hadoop.hbase.Cell;
042import org.apache.hadoop.hbase.CellComparator;
043import org.apache.hadoop.hbase.HConstants;
044import org.apache.hadoop.hbase.HRegionLocation;
045import org.apache.hadoop.hbase.PrivateCellUtil;
046import org.apache.hadoop.hbase.RegionLocations;
047import org.apache.hadoop.hbase.ServerName;
048import org.apache.hadoop.hbase.TableName;
049import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
050import org.apache.hadoop.hbase.ipc.HBaseRpcController;
051import org.apache.hadoop.hbase.ipc.ServerRpcController;
052import org.apache.hadoop.hbase.util.Bytes;
053import org.apache.hadoop.hbase.util.ReflectionUtils;
054import org.apache.hadoop.ipc.RemoteException;
055import org.apache.hadoop.net.DNS;
056import org.apache.yetus.audience.InterfaceAudience;
057import org.slf4j.Logger;
058import org.slf4j.LoggerFactory;
059
060import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
061import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
062import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
063import org.apache.hbase.thirdparty.io.netty.util.Timer;
064
065import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
066import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
067import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
068import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
069import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
070
071/**
072 * Utility used by client connections.
073 */
074@InterfaceAudience.Private
075public final class ConnectionUtils {
076
077  private static final Logger LOG = LoggerFactory.getLogger(ConnectionUtils.class);
078
079  /**
080   * Key for configuration in Configuration whose value is the class we implement making a new
081   * Connection instance.
082   */
083  public static final String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl";
084
085  private ConnectionUtils() {
086  }
087
088  /**
089   * Calculate pause time. Built on {@link HConstants#RETRY_BACKOFF}.
090   * @param pause time to pause
091   * @param tries amount of tries
092   * @return How long to wait after <code>tries</code> retries
093   */
094  public static long getPauseTime(final long pause, final int tries) {
095    int ntries = tries;
096    if (ntries >= HConstants.RETRY_BACKOFF.length) {
097      ntries = HConstants.RETRY_BACKOFF.length - 1;
098    }
099    if (ntries < 0) {
100      ntries = 0;
101    }
102
103    long normalPause = pause * HConstants.RETRY_BACKOFF[ntries];
104    // 1% possible jitter
105    long jitter = (long) (normalPause * ThreadLocalRandom.current().nextFloat() * 0.01f);
106    return normalPause + jitter;
107  }
108
109  /**
110   * Changes the configuration to set the number of retries needed when using Connection internally,
111   * e.g. for updating catalog tables, etc. Call this method before we create any Connections.
112   * @param c The Configuration instance to set the retries into.
113   * @param log Used to log what we set in here.
114   */
115  public static void setServerSideHConnectionRetriesConfig(final Configuration c, final String sn,
116      final Logger log) {
117    // TODO: Fix this. Not all connections from server side should have 10 times the retries.
118    int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
119      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
120    // Go big. Multiply by 10. If we can't get to meta after this many retries
121    // then something seriously wrong.
122    int serversideMultiplier = c.getInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER,
123      HConstants.DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER);
124    int retries = hcRetries * serversideMultiplier;
125    c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
126    log.info(sn + " server-side Connection retries=" + retries);
127  }
128
129  /**
130   * Return retires + 1. The returned value will be in range [1, Integer.MAX_VALUE].
131   */
132  static int retries2Attempts(int retries) {
133    return Math.max(1, retries == Integer.MAX_VALUE ? Integer.MAX_VALUE : retries + 1);
134  }
135
136  /**
137   * Get a unique key for the rpc stub to the given server.
138   */
139  static String getStubKey(String serviceName, ServerName serverName, boolean hostnameCanChange) {
140    // Sometimes, servers go down and they come back up with the same hostname but a different
141    // IP address. Force a resolution of the rsHostname by trying to instantiate an
142    // InetSocketAddress, and this way we will rightfully get a new stubKey.
143    // Also, include the hostname in the key so as to take care of those cases where the
144    // DNS name is different but IP address remains the same.
145    String hostname = serverName.getHostname();
146    int port = serverName.getPort();
147    if (hostnameCanChange) {
148      try {
149        InetAddress ip = InetAddress.getByName(hostname);
150        return serviceName + "@" + hostname + "-" + ip.getHostAddress() + ":" + port;
151      } catch (UnknownHostException e) {
152        LOG.warn("Can not resolve " + hostname + ", please check your network", e);
153      }
154    }
155    return serviceName + "@" + hostname + ":" + port;
156  }
157
158  static void checkHasFamilies(Mutation mutation) {
159    Preconditions.checkArgument(mutation.numFamilies() > 0,
160      "Invalid arguments to %s, zero columns specified", mutation.toString());
161  }
162
163  /** Dummy nonce generator for disabled nonces. */
164  static final NonceGenerator NO_NONCE_GENERATOR = new NonceGenerator() {
165
166    @Override
167    public long newNonce() {
168      return HConstants.NO_NONCE;
169    }
170
171    @Override
172    public long getNonceGroup() {
173      return HConstants.NO_NONCE;
174    }
175  };
176
177  // A byte array in which all elements are the max byte, and it is used to
178  // construct closest front row
179  static final byte[] MAX_BYTE_ARRAY = Bytes.createMaxByteArray(9);
180
181  /**
182   * Create the closest row after the specified row
183   */
184  static byte[] createClosestRowAfter(byte[] row) {
185    return Arrays.copyOf(row, row.length + 1);
186  }
187
188  /**
189   * Create a row before the specified row and very close to the specified row.
190   */
191  static byte[] createCloseRowBefore(byte[] row) {
192    if (row.length == 0) {
193      return MAX_BYTE_ARRAY;
194    }
195    if (row[row.length - 1] == 0) {
196      return Arrays.copyOf(row, row.length - 1);
197    } else {
198      byte[] nextRow = new byte[row.length + MAX_BYTE_ARRAY.length];
199      System.arraycopy(row, 0, nextRow, 0, row.length - 1);
200      nextRow[row.length - 1] = (byte) ((row[row.length - 1] & 0xFF) - 1);
201      System.arraycopy(MAX_BYTE_ARRAY, 0, nextRow, row.length, MAX_BYTE_ARRAY.length);
202      return nextRow;
203    }
204  }
205
206  static boolean isEmptyStartRow(byte[] row) {
207    return Bytes.equals(row, EMPTY_START_ROW);
208  }
209
210  static boolean isEmptyStopRow(byte[] row) {
211    return Bytes.equals(row, EMPTY_END_ROW);
212  }
213
214  static void resetController(HBaseRpcController controller, long timeoutNs, int priority) {
215    controller.reset();
216    if (timeoutNs >= 0) {
217      controller.setCallTimeout(
218        (int) Math.min(Integer.MAX_VALUE, TimeUnit.NANOSECONDS.toMillis(timeoutNs)));
219    }
220    controller.setPriority(priority);
221  }
222
223  static Throwable translateException(Throwable t) {
224    if (t instanceof UndeclaredThrowableException && t.getCause() != null) {
225      t = t.getCause();
226    }
227    if (t instanceof RemoteException) {
228      t = ((RemoteException) t).unwrapRemoteException();
229    }
230    if (t instanceof ServiceException && t.getCause() != null) {
231      t = translateException(t.getCause());
232    }
233    return t;
234  }
235
236  static long calcEstimatedSize(Result rs) {
237    long estimatedHeapSizeOfResult = 0;
238    // We don't make Iterator here
239    for (Cell cell : rs.rawCells()) {
240      estimatedHeapSizeOfResult += cell.heapSize();
241    }
242    return estimatedHeapSizeOfResult;
243  }
244
245  static Result filterCells(Result result, Cell keepCellsAfter) {
246    if (keepCellsAfter == null) {
247      // do not need to filter
248      return result;
249    }
250    // not the same row
251    if (!PrivateCellUtil.matchingRows(keepCellsAfter, result.getRow(), 0, result.getRow().length)) {
252      return result;
253    }
254    Cell[] rawCells = result.rawCells();
255    int index = Arrays.binarySearch(rawCells, keepCellsAfter,
256      CellComparator.getInstance()::compareWithoutRow);
257    if (index < 0) {
258      index = -index - 1;
259    } else {
260      index++;
261    }
262    if (index == 0) {
263      return result;
264    }
265    if (index == rawCells.length) {
266      return null;
267    }
268    return Result.create(Arrays.copyOfRange(rawCells, index, rawCells.length), null,
269      result.isStale(), result.mayHaveMoreCellsInRow());
270  }
271
272  // Add a delta to avoid timeout immediately after a retry sleeping.
273  static final long SLEEP_DELTA_NS = TimeUnit.MILLISECONDS.toNanos(1);
274
275  static Get toCheckExistenceOnly(Get get) {
276    if (get.isCheckExistenceOnly()) {
277      return get;
278    }
279    return ReflectionUtils.newInstance(get.getClass(), get).setCheckExistenceOnly(true);
280  }
281
282  static List<Get> toCheckExistenceOnly(List<Get> gets) {
283    return gets.stream().map(ConnectionUtils::toCheckExistenceOnly).collect(toList());
284  }
285
286  static RegionLocateType getLocateType(Scan scan) {
287    if (scan.isReversed()) {
288      if (isEmptyStartRow(scan.getStartRow())) {
289        return RegionLocateType.BEFORE;
290      } else {
291        return scan.includeStartRow() ? RegionLocateType.CURRENT : RegionLocateType.BEFORE;
292      }
293    } else {
294      return scan.includeStartRow() ? RegionLocateType.CURRENT : RegionLocateType.AFTER;
295    }
296  }
297
298  static boolean noMoreResultsForScan(Scan scan, RegionInfo info) {
299    if (isEmptyStopRow(info.getEndKey())) {
300      return true;
301    }
302    if (isEmptyStopRow(scan.getStopRow())) {
303      return false;
304    }
305    int c = Bytes.compareTo(info.getEndKey(), scan.getStopRow());
306    // 1. if our stop row is less than the endKey of the region
307    // 2. if our stop row is equal to the endKey of the region and we do not include the stop row
308    // for scan.
309    return c > 0 || (c == 0 && !scan.includeStopRow());
310  }
311
312  static boolean noMoreResultsForReverseScan(Scan scan, RegionInfo info) {
313    if (isEmptyStartRow(info.getStartKey())) {
314      return true;
315    }
316    if (isEmptyStopRow(scan.getStopRow())) {
317      return false;
318    }
319    // no need to test the inclusive of the stop row as the start key of a region is included in
320    // the region.
321    return Bytes.compareTo(info.getStartKey(), scan.getStopRow()) <= 0;
322  }
323
324  static <T> CompletableFuture<List<T>> allOf(List<CompletableFuture<T>> futures) {
325    return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))
326      .thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList()));
327  }
328
329  public static ScanResultCache createScanResultCache(Scan scan) {
330    if (scan.getAllowPartialResults()) {
331      return new AllowPartialScanResultCache();
332    } else if (scan.getBatch() > 0) {
333      return new BatchScanResultCache(scan.getBatch());
334    } else {
335      return new CompleteScanResultCache();
336    }
337  }
338
339  private static final String MY_ADDRESS = getMyAddress();
340
341  private static String getMyAddress() {
342    try {
343      return DNS.getDefaultHost("default", "default");
344    } catch (UnknownHostException uhe) {
345      LOG.error("cannot determine my address", uhe);
346      return null;
347    }
348  }
349
350  static boolean isRemote(String host) {
351    return !host.equalsIgnoreCase(MY_ADDRESS);
352  }
353
354  static void incRPCCallsMetrics(ScanMetrics scanMetrics, boolean isRegionServerRemote) {
355    if (scanMetrics == null) {
356      return;
357    }
358    scanMetrics.countOfRPCcalls.incrementAndGet();
359    if (isRegionServerRemote) {
360      scanMetrics.countOfRemoteRPCcalls.incrementAndGet();
361    }
362  }
363
364  static void incRPCRetriesMetrics(ScanMetrics scanMetrics, boolean isRegionServerRemote) {
365    if (scanMetrics == null) {
366      return;
367    }
368    scanMetrics.countOfRPCRetries.incrementAndGet();
369    if (isRegionServerRemote) {
370      scanMetrics.countOfRemoteRPCRetries.incrementAndGet();
371    }
372  }
373
374  static void updateResultsMetrics(ScanMetrics scanMetrics, Result[] rrs,
375      boolean isRegionServerRemote) {
376    if (scanMetrics == null || rrs == null || rrs.length == 0) {
377      return;
378    }
379    long resultSize = 0;
380    for (Result rr : rrs) {
381      for (Cell cell : rr.rawCells()) {
382        resultSize += PrivateCellUtil.estimatedSerializedSizeOf(cell);
383      }
384    }
385    scanMetrics.countOfBytesInResults.addAndGet(resultSize);
386    if (isRegionServerRemote) {
387      scanMetrics.countOfBytesInRemoteResults.addAndGet(resultSize);
388    }
389  }
390
391  /**
392   * Use the scan metrics returned by the server to add to the identically named counters in the
393   * client side metrics. If a counter does not exist with the same name as the server side metric,
394   * the attempt to increase the counter will fail.
395   */
396  static void updateServerSideMetrics(ScanMetrics scanMetrics, ScanResponse response) {
397    if (scanMetrics == null || response == null || !response.hasScanMetrics()) {
398      return;
399    }
400    ResponseConverter.getScanMetrics(response).forEach(scanMetrics::addToCounter);
401  }
402
403  static void incRegionCountMetrics(ScanMetrics scanMetrics) {
404    if (scanMetrics == null) {
405      return;
406    }
407    scanMetrics.countOfRegions.incrementAndGet();
408  }
409
410  /**
411   * Connect the two futures, if the src future is done, then mark the dst future as done. And if
412   * the dst future is done, then cancel the src future. This is used for timeline consistent read.
413   * <p/>
414   * Pass empty metrics if you want to link the primary future and the dst future so we will not
415   * increase the hedge read related metrics.
416   */
417  private static <T> void connect(CompletableFuture<T> srcFuture, CompletableFuture<T> dstFuture,
418      Optional<MetricsConnection> metrics) {
419    addListener(srcFuture, (r, e) -> {
420      if (e != null) {
421        dstFuture.completeExceptionally(e);
422      } else {
423        if (dstFuture.complete(r)) {
424          metrics.ifPresent(MetricsConnection::incrHedgedReadWin);
425        }
426      }
427    });
428    // The cancellation may be a dummy one as the dstFuture may be completed by this srcFuture.
429    // Notice that this is a bit tricky, as the execution chain maybe 'complete src -> complete dst
430    // -> cancel src', for now it seems to be fine, as the will use CAS to set the result first in
431    // CompletableFuture. If later this causes problems, we could use whenCompleteAsync to break the
432    // tie.
433    addListener(dstFuture, (r, e) -> srcFuture.cancel(false));
434  }
435
436  private static <T> void sendRequestsToSecondaryReplicas(
437      Function<Integer, CompletableFuture<T>> requestReplica, RegionLocations locs,
438      CompletableFuture<T> future, Optional<MetricsConnection> metrics) {
439    if (future.isDone()) {
440      // do not send requests to secondary replicas if the future is done, i.e, the primary request
441      // has already been finished.
442      return;
443    }
444    for (int replicaId = 1, n = locs.size(); replicaId < n; replicaId++) {
445      CompletableFuture<T> secondaryFuture = requestReplica.apply(replicaId);
446      metrics.ifPresent(MetricsConnection::incrHedgedReadOps);
447      connect(secondaryFuture, future, metrics);
448    }
449  }
450
451  static <T> CompletableFuture<T> timelineConsistentRead(AsyncRegionLocator locator,
452      TableName tableName, Query query, byte[] row, RegionLocateType locateType,
453      Function<Integer, CompletableFuture<T>> requestReplica, long rpcTimeoutNs,
454      long primaryCallTimeoutNs, Timer retryTimer, Optional<MetricsConnection> metrics) {
455    if (query.getConsistency() != Consistency.TIMELINE) {
456      return requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID);
457    }
458    // user specifies a replica id explicitly, just send request to the specific replica
459    if (query.getReplicaId() >= 0) {
460      return requestReplica.apply(query.getReplicaId());
461    }
462    // Timeline consistent read, where we may send requests to other region replicas
463    CompletableFuture<T> primaryFuture = requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID);
464    CompletableFuture<T> future = new CompletableFuture<>();
465    connect(primaryFuture, future, Optional.empty());
466    long startNs = System.nanoTime();
467    // after the getRegionLocations, all the locations for the replicas of this region should have
468    // been cached, so it is not big deal to locate them again when actually sending requests to
469    // these replicas.
470    addListener(locator.getRegionLocations(tableName, row, locateType, false, rpcTimeoutNs),
471      (locs, error) -> {
472        if (error != null) {
473          LOG.warn(
474            "Failed to locate all the replicas for table={}, row='{}', locateType={}" +
475              " give up timeline consistent read",
476            tableName, Bytes.toStringBinary(row), locateType, error);
477          return;
478        }
479        if (locs.size() <= 1) {
480          LOG.warn(
481            "There are no secondary replicas for region {}, give up timeline consistent read",
482            locs.getDefaultRegionLocation().getRegion());
483          return;
484        }
485        long delayNs = primaryCallTimeoutNs - (System.nanoTime() - startNs);
486        if (delayNs <= 0) {
487          sendRequestsToSecondaryReplicas(requestReplica, locs, future, metrics);
488        } else {
489          retryTimer.newTimeout(
490            timeout -> sendRequestsToSecondaryReplicas(requestReplica, locs, future, metrics),
491            delayNs, TimeUnit.NANOSECONDS);
492        }
493      });
494    return future;
495  }
496
497  // validate for well-formedness
498  static void validatePut(Put put, int maxKeyValueSize) throws IllegalArgumentException {
499    if (put.isEmpty()) {
500      throw new IllegalArgumentException("No columns to insert");
501    }
502    if (maxKeyValueSize > 0) {
503      for (List<Cell> list : put.getFamilyCellMap().values()) {
504        for (Cell cell : list) {
505          if (cell.getSerializedSize() > maxKeyValueSize) {
506            throw new IllegalArgumentException("KeyValue size too large");
507          }
508        }
509      }
510    }
511  }
512
513  /**
514   * Select the priority for the rpc call.
515   * <p/>
516   * The rules are:
517   * <ol>
518   * <li>If user set a priority explicitly, then just use it.</li>
519   * <li>For system table, use {@link HConstants#SYSTEMTABLE_QOS}.</li>
520   * <li>For other tables, use {@link HConstants#NORMAL_QOS}.</li>
521   * </ol>
522   * @param priority the priority set by user, can be {@link HConstants#PRIORITY_UNSET}.
523   * @param tableName the table we operate on
524   */
525  static int calcPriority(int priority, TableName tableName) {
526    if (priority != HConstants.PRIORITY_UNSET) {
527      return priority;
528    } else {
529      return getPriority(tableName);
530    }
531  }
532
533  static int getPriority(TableName tableName) {
534    if (tableName.isSystemTable()) {
535      return HConstants.SYSTEMTABLE_QOS;
536    } else {
537      return HConstants.NORMAL_QOS;
538    }
539  }
540
541  static <T> CompletableFuture<T> getOrFetch(AtomicReference<T> cacheRef,
542      AtomicReference<CompletableFuture<T>> futureRef, boolean reload,
543      Supplier<CompletableFuture<T>> fetch, Predicate<T> validator, String type) {
544    for (;;) {
545      if (!reload) {
546        T value = cacheRef.get();
547        if (value != null && validator.test(value)) {
548          return CompletableFuture.completedFuture(value);
549        }
550      }
551      LOG.trace("{} cache is null, try fetching from registry", type);
552      if (futureRef.compareAndSet(null, new CompletableFuture<>())) {
553        LOG.debug("Start fetching{} from registry", type);
554        CompletableFuture<T> future = futureRef.get();
555        addListener(fetch.get(), (value, error) -> {
556          if (error != null) {
557            LOG.debug("Failed to fetch {} from registry", type, error);
558            futureRef.getAndSet(null).completeExceptionally(error);
559            return;
560          }
561          LOG.debug("The fetched {} is {}", type, value);
562          // Here we update cache before reset future, so it is possible that someone can get a
563          // stale value. Consider this:
564          // 1. update cacheRef
565          // 2. someone clears the cache and relocates again
566          // 3. the futureRef is not null so the old future is used.
567          // 4. we clear futureRef and complete the future in it with the value being
568          // cleared in step 2.
569          // But we do not think it is a big deal as it rarely happens, and even if it happens, the
570          // caller will retry again later, no correctness problems.
571          cacheRef.set(value);
572          futureRef.set(null);
573          future.complete(value);
574        });
575        return future;
576      } else {
577        CompletableFuture<T> future = futureRef.get();
578        if (future != null) {
579          return future;
580        }
581      }
582    }
583  }
584
585  static void updateStats(Optional<ServerStatisticTracker> optStats,
586      Optional<MetricsConnection> optMetrics, ServerName serverName, MultiResponse resp) {
587    if (!optStats.isPresent() && !optMetrics.isPresent()) {
588      // ServerStatisticTracker and MetricsConnection are both not present, just return
589      return;
590    }
591    resp.getResults().forEach((regionName, regionResult) -> {
592      ClientProtos.RegionLoadStats stat = regionResult.getStat();
593      if (stat == null) {
594        LOG.error("No ClientProtos.RegionLoadStats found for server={}, region={}", serverName,
595          Bytes.toStringBinary(regionName));
596        return;
597      }
598      RegionLoadStats regionLoadStats = ProtobufUtil.createRegionLoadStats(stat);
599      optStats.ifPresent(
600        stats -> ResultStatsUtil.updateStats(stats, serverName, regionName, regionLoadStats));
601      optMetrics.ifPresent(
602        metrics -> ResultStatsUtil.updateStats(metrics, serverName, regionName, regionLoadStats));
603    });
604  }
605
606  @FunctionalInterface
607  interface Converter<D, I, S> {
608    D convert(I info, S src) throws IOException;
609  }
610
611  @FunctionalInterface
612  interface RpcCall<RESP, REQ> {
613    void call(ClientService.Interface stub, HBaseRpcController controller, REQ req,
614        RpcCallback<RESP> done);
615  }
616
617  static <REQ, PREQ, PRESP, RESP> CompletableFuture<RESP> call(HBaseRpcController controller,
618      HRegionLocation loc, ClientService.Interface stub, REQ req,
619      Converter<PREQ, byte[], REQ> reqConvert, RpcCall<PRESP, PREQ> rpcCall,
620      Converter<RESP, HBaseRpcController, PRESP> respConverter) {
621    CompletableFuture<RESP> future = new CompletableFuture<>();
622    try {
623      rpcCall.call(stub, controller, reqConvert.convert(loc.getRegion().getRegionName(), req),
624        new RpcCallback<PRESP>() {
625
626          @Override
627          public void run(PRESP resp) {
628            if (controller.failed()) {
629              future.completeExceptionally(controller.getFailed());
630            } else {
631              try {
632                future.complete(respConverter.convert(controller, resp));
633              } catch (IOException e) {
634                future.completeExceptionally(e);
635              }
636            }
637          }
638        });
639    } catch (IOException e) {
640      future.completeExceptionally(e);
641    }
642    return future;
643  }
644
645  static void shutdownPool(ExecutorService pool) {
646    pool.shutdown();
647    try {
648      if (!pool.awaitTermination(10, TimeUnit.SECONDS)) {
649        pool.shutdownNow();
650      }
651    } catch (InterruptedException e) {
652      pool.shutdownNow();
653    }
654  }
655
656  static void setCoprocessorError(com.google.protobuf.RpcController controller, Throwable error) {
657    if (controller == null) {
658      return;
659    }
660    if (controller instanceof ServerRpcController) {
661      if (error instanceof IOException) {
662        ((ServerRpcController) controller).setFailedOn((IOException) error);
663      } else {
664        ((ServerRpcController) controller).setFailedOn(new IOException(error));
665      }
666    } else if (controller instanceof ClientCoprocessorRpcController) {
667      ((ClientCoprocessorRpcController) controller).setFailed(error);
668    } else {
669      controller.setFailed(error.toString());
670    }
671  }
672}