001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import static java.util.stream.Collectors.toList;
021import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
022import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
023import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
024
025import java.io.IOException;
026import java.lang.reflect.UndeclaredThrowableException;
027import java.net.InetAddress;
028import java.net.UnknownHostException;
029import java.util.Arrays;
030import java.util.List;
031import java.util.Optional;
032import java.util.concurrent.CompletableFuture;
033import java.util.concurrent.ExecutorService;
034import java.util.concurrent.ThreadLocalRandom;
035import java.util.concurrent.TimeUnit;
036import java.util.concurrent.atomic.AtomicReference;
037import java.util.function.Function;
038import java.util.function.Predicate;
039import java.util.function.Supplier;
040import org.apache.hadoop.conf.Configuration;
041import org.apache.hadoop.hbase.Cell;
042import org.apache.hadoop.hbase.CellComparator;
043import org.apache.hadoop.hbase.HConstants;
044import org.apache.hadoop.hbase.HRegionLocation;
045import org.apache.hadoop.hbase.PrivateCellUtil;
046import org.apache.hadoop.hbase.RegionLocations;
047import org.apache.hadoop.hbase.ServerName;
048import org.apache.hadoop.hbase.TableName;
049import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
050import org.apache.hadoop.hbase.ipc.HBaseRpcController;
051import org.apache.hadoop.hbase.ipc.ServerRpcController;
052import org.apache.hadoop.hbase.util.Bytes;
053import org.apache.hadoop.hbase.util.ReflectionUtils;
054import org.apache.hadoop.ipc.RemoteException;
055import org.apache.hadoop.net.DNS;
056import org.apache.yetus.audience.InterfaceAudience;
057import org.slf4j.Logger;
058import org.slf4j.LoggerFactory;
059
060import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
061import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
062import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
063import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
064import org.apache.hbase.thirdparty.io.netty.util.Timer;
065
066import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
067import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
068import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
069import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
070import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
071
072/**
073 * Utility used by client connections.
074 */
075@InterfaceAudience.Private
076public final class ConnectionUtils {
077
078  private static final Logger LOG = LoggerFactory.getLogger(ConnectionUtils.class);
079
080  /**
081   * Key for configuration in Configuration whose value is the class we implement making a new
082   * Connection instance.
083   */
084  public static final String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl";
085
086  private ConnectionUtils() {
087  }
088
089  /**
090   * Calculate pause time. Built on {@link HConstants#RETRY_BACKOFF}.
091   * @param pause time to pause
092   * @param tries amount of tries
093   * @return How long to wait after <code>tries</code> retries
094   */
095  public static long getPauseTime(final long pause, final int tries) {
096    int ntries = tries;
097    if (ntries >= HConstants.RETRY_BACKOFF.length) {
098      ntries = HConstants.RETRY_BACKOFF.length - 1;
099    }
100    if (ntries < 0) {
101      ntries = 0;
102    }
103
104    long normalPause = pause * HConstants.RETRY_BACKOFF[ntries];
105    // 1% possible jitter
106    long jitter = (long) (normalPause * ThreadLocalRandom.current().nextFloat() * 0.01f);
107    return normalPause + jitter;
108  }
109
110  /**
111   * Changes the configuration to set the number of retries needed when using Connection internally,
112   * e.g. for updating catalog tables, etc. Call this method before we create any Connections.
113   * @param c The Configuration instance to set the retries into.
114   * @param log Used to log what we set in here.
115   */
116  public static void setServerSideHConnectionRetriesConfig(final Configuration c, final String sn,
117      final Logger log) {
118    // TODO: Fix this. Not all connections from server side should have 10 times the retries.
119    int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
120      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
121    // Go big. Multiply by 10. If we can't get to meta after this many retries
122    // then something seriously wrong.
123    int serversideMultiplier = c.getInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER,
124      HConstants.DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER);
125    int retries = hcRetries * serversideMultiplier;
126    c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
127    log.info(sn + " server-side Connection retries=" + retries);
128  }
129
130  /**
131   * Return retires + 1. The returned value will be in range [1, Integer.MAX_VALUE].
132   */
133  static int retries2Attempts(int retries) {
134    return Math.max(1, retries == Integer.MAX_VALUE ? Integer.MAX_VALUE : retries + 1);
135  }
136
137  /**
138   * Get a unique key for the rpc stub to the given server.
139   */
140  static String getStubKey(String serviceName, ServerName serverName, boolean hostnameCanChange) {
141    // Sometimes, servers go down and they come back up with the same hostname but a different
142    // IP address. Force a resolution of the rsHostname by trying to instantiate an
143    // InetSocketAddress, and this way we will rightfully get a new stubKey.
144    // Also, include the hostname in the key so as to take care of those cases where the
145    // DNS name is different but IP address remains the same.
146    String hostname = serverName.getHostname();
147    int port = serverName.getPort();
148    if (hostnameCanChange) {
149      try {
150        InetAddress ip = InetAddress.getByName(hostname);
151        return serviceName + "@" + hostname + "-" + ip.getHostAddress() + ":" + port;
152      } catch (UnknownHostException e) {
153        LOG.warn("Can not resolve " + hostname + ", please check your network", e);
154      }
155    }
156    return serviceName + "@" + hostname + ":" + port;
157  }
158
159  static void checkHasFamilies(Mutation mutation) {
160    Preconditions.checkArgument(mutation.numFamilies() > 0,
161      "Invalid arguments to %s, zero columns specified", mutation.toString());
162  }
163
164  /** Dummy nonce generator for disabled nonces. */
165  static final NonceGenerator NO_NONCE_GENERATOR = new NonceGenerator() {
166
167    @Override
168    public long newNonce() {
169      return HConstants.NO_NONCE;
170    }
171
172    @Override
173    public long getNonceGroup() {
174      return HConstants.NO_NONCE;
175    }
176  };
177
178  // A byte array in which all elements are the max byte, and it is used to
179  // construct closest front row
180  static final byte[] MAX_BYTE_ARRAY = Bytes.createMaxByteArray(9);
181
182  /**
183   * Create the closest row after the specified row
184   */
185  static byte[] createClosestRowAfter(byte[] row) {
186    return Arrays.copyOf(row, row.length + 1);
187  }
188
189  /**
190   * Create a row before the specified row and very close to the specified row.
191   */
192  static byte[] createCloseRowBefore(byte[] row) {
193    if (row.length == 0) {
194      return MAX_BYTE_ARRAY;
195    }
196    if (row[row.length - 1] == 0) {
197      return Arrays.copyOf(row, row.length - 1);
198    } else {
199      byte[] nextRow = new byte[row.length + MAX_BYTE_ARRAY.length];
200      System.arraycopy(row, 0, nextRow, 0, row.length - 1);
201      nextRow[row.length - 1] = (byte) ((row[row.length - 1] & 0xFF) - 1);
202      System.arraycopy(MAX_BYTE_ARRAY, 0, nextRow, row.length, MAX_BYTE_ARRAY.length);
203      return nextRow;
204    }
205  }
206
207  static boolean isEmptyStartRow(byte[] row) {
208    return Bytes.equals(row, EMPTY_START_ROW);
209  }
210
211  static boolean isEmptyStopRow(byte[] row) {
212    return Bytes.equals(row, EMPTY_END_ROW);
213  }
214
215  static void resetController(HBaseRpcController controller, long timeoutNs, int priority) {
216    controller.reset();
217    if (timeoutNs >= 0) {
218      controller.setCallTimeout(
219        (int) Math.min(Integer.MAX_VALUE, TimeUnit.NANOSECONDS.toMillis(timeoutNs)));
220    }
221    controller.setPriority(priority);
222  }
223
224  static Throwable translateException(Throwable t) {
225    if (t instanceof UndeclaredThrowableException && t.getCause() != null) {
226      t = t.getCause();
227    }
228    if (t instanceof RemoteException) {
229      t = ((RemoteException) t).unwrapRemoteException();
230    }
231    if (t instanceof ServiceException && t.getCause() != null) {
232      t = translateException(t.getCause());
233    }
234    return t;
235  }
236
237  static long calcEstimatedSize(Result rs) {
238    long estimatedHeapSizeOfResult = 0;
239    // We don't make Iterator here
240    for (Cell cell : rs.rawCells()) {
241      estimatedHeapSizeOfResult += cell.heapSize();
242    }
243    return estimatedHeapSizeOfResult;
244  }
245
246  static Result filterCells(Result result, Cell keepCellsAfter) {
247    if (keepCellsAfter == null) {
248      // do not need to filter
249      return result;
250    }
251    // not the same row
252    if (!PrivateCellUtil.matchingRows(keepCellsAfter, result.getRow(), 0, result.getRow().length)) {
253      return result;
254    }
255    Cell[] rawCells = result.rawCells();
256    int index = Arrays.binarySearch(rawCells, keepCellsAfter,
257      CellComparator.getInstance()::compareWithoutRow);
258    if (index < 0) {
259      index = -index - 1;
260    } else {
261      index++;
262    }
263    if (index == 0) {
264      return result;
265    }
266    if (index == rawCells.length) {
267      return null;
268    }
269    return Result.create(Arrays.copyOfRange(rawCells, index, rawCells.length), null,
270      result.isStale(), result.mayHaveMoreCellsInRow());
271  }
272
273  // Add a delta to avoid timeout immediately after a retry sleeping.
274  static final long SLEEP_DELTA_NS = TimeUnit.MILLISECONDS.toNanos(1);
275
276  static Get toCheckExistenceOnly(Get get) {
277    if (get.isCheckExistenceOnly()) {
278      return get;
279    }
280    return ReflectionUtils.newInstance(get.getClass(), get).setCheckExistenceOnly(true);
281  }
282
283  static List<Get> toCheckExistenceOnly(List<Get> gets) {
284    return gets.stream().map(ConnectionUtils::toCheckExistenceOnly).collect(toList());
285  }
286
287  static RegionLocateType getLocateType(Scan scan) {
288    if (scan.isReversed()) {
289      if (isEmptyStartRow(scan.getStartRow())) {
290        return RegionLocateType.BEFORE;
291      } else {
292        return scan.includeStartRow() ? RegionLocateType.CURRENT : RegionLocateType.BEFORE;
293      }
294    } else {
295      return scan.includeStartRow() ? RegionLocateType.CURRENT : RegionLocateType.AFTER;
296    }
297  }
298
299  static boolean noMoreResultsForScan(Scan scan, RegionInfo info) {
300    if (isEmptyStopRow(info.getEndKey())) {
301      return true;
302    }
303    if (isEmptyStopRow(scan.getStopRow())) {
304      return false;
305    }
306    int c = Bytes.compareTo(info.getEndKey(), scan.getStopRow());
307    // 1. if our stop row is less than the endKey of the region
308    // 2. if our stop row is equal to the endKey of the region and we do not include the stop row
309    // for scan.
310    return c > 0 || (c == 0 && !scan.includeStopRow());
311  }
312
313  static boolean noMoreResultsForReverseScan(Scan scan, RegionInfo info) {
314    if (isEmptyStartRow(info.getStartKey())) {
315      return true;
316    }
317    if (isEmptyStopRow(scan.getStopRow())) {
318      return false;
319    }
320    // no need to test the inclusive of the stop row as the start key of a region is included in
321    // the region.
322    return Bytes.compareTo(info.getStartKey(), scan.getStopRow()) <= 0;
323  }
324
325  static <T> CompletableFuture<List<T>> allOf(List<CompletableFuture<T>> futures) {
326    return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))
327      .thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList()));
328  }
329
330  public static ScanResultCache createScanResultCache(Scan scan) {
331    if (scan.getAllowPartialResults()) {
332      return new AllowPartialScanResultCache();
333    } else if (scan.getBatch() > 0) {
334      return new BatchScanResultCache(scan.getBatch());
335    } else {
336      return new CompleteScanResultCache();
337    }
338  }
339
340  private static final String MY_ADDRESS = getMyAddress();
341
342  private static String getMyAddress() {
343    try {
344      return DNS.getDefaultHost("default", "default");
345    } catch (UnknownHostException uhe) {
346      LOG.error("cannot determine my address", uhe);
347      return null;
348    }
349  }
350
351  static boolean isRemote(String host) {
352    return !host.equalsIgnoreCase(MY_ADDRESS);
353  }
354
355  static void incRPCCallsMetrics(ScanMetrics scanMetrics, boolean isRegionServerRemote) {
356    if (scanMetrics == null) {
357      return;
358    }
359    scanMetrics.countOfRPCcalls.incrementAndGet();
360    if (isRegionServerRemote) {
361      scanMetrics.countOfRemoteRPCcalls.incrementAndGet();
362    }
363  }
364
365  static void incRPCRetriesMetrics(ScanMetrics scanMetrics, boolean isRegionServerRemote) {
366    if (scanMetrics == null) {
367      return;
368    }
369    scanMetrics.countOfRPCRetries.incrementAndGet();
370    if (isRegionServerRemote) {
371      scanMetrics.countOfRemoteRPCRetries.incrementAndGet();
372    }
373  }
374
375  static void updateResultsMetrics(ScanMetrics scanMetrics, Result[] rrs,
376      boolean isRegionServerRemote) {
377    if (scanMetrics == null || rrs == null || rrs.length == 0) {
378      return;
379    }
380    long resultSize = 0;
381    for (Result rr : rrs) {
382      for (Cell cell : rr.rawCells()) {
383        resultSize += PrivateCellUtil.estimatedSerializedSizeOf(cell);
384      }
385    }
386    scanMetrics.countOfBytesInResults.addAndGet(resultSize);
387    if (isRegionServerRemote) {
388      scanMetrics.countOfBytesInRemoteResults.addAndGet(resultSize);
389    }
390  }
391
392  /**
393   * Use the scan metrics returned by the server to add to the identically named counters in the
394   * client side metrics. If a counter does not exist with the same name as the server side metric,
395   * the attempt to increase the counter will fail.
396   */
397  static void updateServerSideMetrics(ScanMetrics scanMetrics, ScanResponse response) {
398    if (scanMetrics == null || response == null || !response.hasScanMetrics()) {
399      return;
400    }
401    ResponseConverter.getScanMetrics(response).forEach(scanMetrics::addToCounter);
402  }
403
404  static void incRegionCountMetrics(ScanMetrics scanMetrics) {
405    if (scanMetrics == null) {
406      return;
407    }
408    scanMetrics.countOfRegions.incrementAndGet();
409  }
410
411  /**
412   * Connect the two futures, if the src future is done, then mark the dst future as done. And if
413   * the dst future is done, then cancel the src future. This is used for timeline consistent read.
414   * <p/>
415   * Pass empty metrics if you want to link the primary future and the dst future so we will not
416   * increase the hedge read related metrics.
417   */
418  private static <T> void connect(CompletableFuture<T> srcFuture, CompletableFuture<T> dstFuture,
419      Optional<MetricsConnection> metrics) {
420    addListener(srcFuture, (r, e) -> {
421      if (e != null) {
422        dstFuture.completeExceptionally(e);
423      } else {
424        if (dstFuture.complete(r)) {
425          metrics.ifPresent(MetricsConnection::incrHedgedReadWin);
426        }
427      }
428    });
429    // The cancellation may be a dummy one as the dstFuture may be completed by this srcFuture.
430    // Notice that this is a bit tricky, as the execution chain maybe 'complete src -> complete dst
431    // -> cancel src', for now it seems to be fine, as the will use CAS to set the result first in
432    // CompletableFuture. If later this causes problems, we could use whenCompleteAsync to break the
433    // tie.
434    addListener(dstFuture, (r, e) -> srcFuture.cancel(false));
435  }
436
437  private static <T> void sendRequestsToSecondaryReplicas(
438      Function<Integer, CompletableFuture<T>> requestReplica, RegionLocations locs,
439      CompletableFuture<T> future, Optional<MetricsConnection> metrics) {
440    if (future.isDone()) {
441      // do not send requests to secondary replicas if the future is done, i.e, the primary request
442      // has already been finished.
443      return;
444    }
445    for (int replicaId = 1, n = locs.size(); replicaId < n; replicaId++) {
446      CompletableFuture<T> secondaryFuture = requestReplica.apply(replicaId);
447      metrics.ifPresent(MetricsConnection::incrHedgedReadOps);
448      connect(secondaryFuture, future, metrics);
449    }
450  }
451
452  static <T> CompletableFuture<T> timelineConsistentRead(AsyncRegionLocator locator,
453      TableName tableName, Query query, byte[] row, RegionLocateType locateType,
454      Function<Integer, CompletableFuture<T>> requestReplica, long rpcTimeoutNs,
455      long primaryCallTimeoutNs, Timer retryTimer, Optional<MetricsConnection> metrics) {
456    if (query.getConsistency() != Consistency.TIMELINE) {
457      return requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID);
458    }
459    // user specifies a replica id explicitly, just send request to the specific replica
460    if (query.getReplicaId() >= 0) {
461      return requestReplica.apply(query.getReplicaId());
462    }
463    // Timeline consistent read, where we may send requests to other region replicas
464    CompletableFuture<T> primaryFuture = requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID);
465    CompletableFuture<T> future = new CompletableFuture<>();
466    connect(primaryFuture, future, Optional.empty());
467    long startNs = System.nanoTime();
468    // after the getRegionLocations, all the locations for the replicas of this region should have
469    // been cached, so it is not big deal to locate them again when actually sending requests to
470    // these replicas.
471    addListener(locator.getRegionLocations(tableName, row, locateType, false, rpcTimeoutNs),
472      (locs, error) -> {
473        if (error != null) {
474          LOG.warn(
475            "Failed to locate all the replicas for table={}, row='{}', locateType={}" +
476              " give up timeline consistent read",
477            tableName, Bytes.toStringBinary(row), locateType, error);
478          return;
479        }
480        if (locs.size() <= 1) {
481          LOG.warn(
482            "There are no secondary replicas for region {}, give up timeline consistent read",
483            locs.getDefaultRegionLocation().getRegion());
484          return;
485        }
486        long delayNs = primaryCallTimeoutNs - (System.nanoTime() - startNs);
487        if (delayNs <= 0) {
488          sendRequestsToSecondaryReplicas(requestReplica, locs, future, metrics);
489        } else {
490          retryTimer.newTimeout(
491            timeout -> sendRequestsToSecondaryReplicas(requestReplica, locs, future, metrics),
492            delayNs, TimeUnit.NANOSECONDS);
493        }
494      });
495    return future;
496  }
497
498  // validate for well-formedness
499  static void validatePut(Put put, int maxKeyValueSize) throws IllegalArgumentException {
500    if (put.isEmpty()) {
501      throw new IllegalArgumentException("No columns to insert");
502    }
503    if (maxKeyValueSize > 0) {
504      for (List<Cell> list : put.getFamilyCellMap().values()) {
505        for (Cell cell : list) {
506          if (cell.getSerializedSize() > maxKeyValueSize) {
507            throw new IllegalArgumentException("KeyValue size too large");
508          }
509        }
510      }
511    }
512  }
513
514  /**
515   * Select the priority for the rpc call.
516   * <p/>
517   * The rules are:
518   * <ol>
519   * <li>If user set a priority explicitly, then just use it.</li>
520   * <li>For system table, use {@link HConstants#SYSTEMTABLE_QOS}.</li>
521   * <li>For other tables, use {@link HConstants#NORMAL_QOS}.</li>
522   * </ol>
523   * @param priority the priority set by user, can be {@link HConstants#PRIORITY_UNSET}.
524   * @param tableName the table we operate on
525   */
526  static int calcPriority(int priority, TableName tableName) {
527    if (priority != HConstants.PRIORITY_UNSET) {
528      return priority;
529    } else {
530      return getPriority(tableName);
531    }
532  }
533
534  static int getPriority(TableName tableName) {
535    if (tableName.isSystemTable()) {
536      return HConstants.SYSTEMTABLE_QOS;
537    } else {
538      return HConstants.NORMAL_QOS;
539    }
540  }
541
542  static <T> CompletableFuture<T> getOrFetch(AtomicReference<T> cacheRef,
543      AtomicReference<CompletableFuture<T>> futureRef, boolean reload,
544      Supplier<CompletableFuture<T>> fetch, Predicate<T> validator, String type) {
545    for (;;) {
546      if (!reload) {
547        T value = cacheRef.get();
548        if (value != null && validator.test(value)) {
549          return CompletableFuture.completedFuture(value);
550        }
551      }
552      LOG.trace("{} cache is null, try fetching from registry", type);
553      if (futureRef.compareAndSet(null, new CompletableFuture<>())) {
554        LOG.debug("Start fetching {} from registry", type);
555        CompletableFuture<T> future = futureRef.get();
556        addListener(fetch.get(), (value, error) -> {
557          if (error != null) {
558            LOG.debug("Failed to fetch {} from registry", type, error);
559            futureRef.getAndSet(null).completeExceptionally(error);
560            return;
561          }
562          LOG.debug("The fetched {} is {}", type, value);
563          // Here we update cache before reset future, so it is possible that someone can get a
564          // stale value. Consider this:
565          // 1. update cacheRef
566          // 2. someone clears the cache and relocates again
567          // 3. the futureRef is not null so the old future is used.
568          // 4. we clear futureRef and complete the future in it with the value being
569          // cleared in step 2.
570          // But we do not think it is a big deal as it rarely happens, and even if it happens, the
571          // caller will retry again later, no correctness problems.
572          cacheRef.set(value);
573          futureRef.set(null);
574          future.complete(value);
575        });
576        return future;
577      } else {
578        CompletableFuture<T> future = futureRef.get();
579        if (future != null) {
580          return future;
581        }
582      }
583    }
584  }
585
586  static void updateStats(Optional<ServerStatisticTracker> optStats,
587      Optional<MetricsConnection> optMetrics, ServerName serverName, MultiResponse resp) {
588    if (!optStats.isPresent() && !optMetrics.isPresent()) {
589      // ServerStatisticTracker and MetricsConnection are both not present, just return
590      return;
591    }
592    resp.getResults().forEach((regionName, regionResult) -> {
593      ClientProtos.RegionLoadStats stat = regionResult.getStat();
594      if (stat == null) {
595        LOG.error("No ClientProtos.RegionLoadStats found for server={}, region={}", serverName,
596          Bytes.toStringBinary(regionName));
597        return;
598      }
599      RegionLoadStats regionLoadStats = ProtobufUtil.createRegionLoadStats(stat);
600      optStats.ifPresent(
601        stats -> ResultStatsUtil.updateStats(stats, serverName, regionName, regionLoadStats));
602      optMetrics.ifPresent(
603        metrics -> ResultStatsUtil.updateStats(metrics, serverName, regionName, regionLoadStats));
604    });
605  }
606
607  @FunctionalInterface
608  interface Converter<D, I, S> {
609    D convert(I info, S src) throws IOException;
610  }
611
612  @FunctionalInterface
613  interface RpcCall<RESP, REQ> {
614    void call(ClientService.Interface stub, HBaseRpcController controller, REQ req,
615        RpcCallback<RESP> done);
616  }
617
618  static <REQ, PREQ, PRESP, RESP> CompletableFuture<RESP> call(HBaseRpcController controller,
619      HRegionLocation loc, ClientService.Interface stub, REQ req,
620      Converter<PREQ, byte[], REQ> reqConvert, RpcCall<PRESP, PREQ> rpcCall,
621      Converter<RESP, HBaseRpcController, PRESP> respConverter) {
622    CompletableFuture<RESP> future = new CompletableFuture<>();
623    try {
624      rpcCall.call(stub, controller, reqConvert.convert(loc.getRegion().getRegionName(), req),
625        new RpcCallback<PRESP>() {
626
627          @Override
628          public void run(PRESP resp) {
629            if (controller.failed()) {
630              future.completeExceptionally(controller.getFailed());
631            } else {
632              try {
633                future.complete(respConverter.convert(controller, resp));
634              } catch (IOException e) {
635                future.completeExceptionally(e);
636              }
637            }
638          }
639        });
640    } catch (IOException e) {
641      future.completeExceptionally(e);
642    }
643    return future;
644  }
645
646  static void shutdownPool(ExecutorService pool) {
647    pool.shutdown();
648    try {
649      if (!pool.awaitTermination(10, TimeUnit.SECONDS)) {
650        pool.shutdownNow();
651      }
652    } catch (InterruptedException e) {
653      pool.shutdownNow();
654    }
655  }
656
657  static void setCoprocessorError(RpcController controller, Throwable error) {
658    if (controller == null) {
659      return;
660    }
661    if (controller instanceof ServerRpcController) {
662      if (error instanceof IOException) {
663        ((ServerRpcController) controller).setFailedOn((IOException) error);
664      } else {
665        ((ServerRpcController) controller).setFailedOn(new IOException(error));
666      }
667    } else if (controller instanceof ClientCoprocessorRpcController) {
668      ((ClientCoprocessorRpcController) controller).setFailed(error);
669    } else {
670      controller.setFailed(error.toString());
671    }
672  }
673}