001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import static java.util.stream.Collectors.toList;
021import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
022import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
023import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
024
025import java.io.IOException;
026import java.lang.reflect.UndeclaredThrowableException;
027import java.net.InetAddress;
028import java.net.UnknownHostException;
029import java.util.Arrays;
030import java.util.List;
031import java.util.concurrent.CompletableFuture;
032import java.util.concurrent.ExecutorService;
033import java.util.concurrent.ThreadLocalRandom;
034import java.util.concurrent.TimeUnit;
035import java.util.concurrent.atomic.AtomicReference;
036import java.util.function.Function;
037import java.util.function.Predicate;
038import java.util.function.Supplier;
039import org.apache.hadoop.conf.Configuration;
040import org.apache.hadoop.hbase.Cell;
041import org.apache.hadoop.hbase.CellComparator;
042import org.apache.hadoop.hbase.HConstants;
043import org.apache.hadoop.hbase.PrivateCellUtil;
044import org.apache.hadoop.hbase.RegionLocations;
045import org.apache.hadoop.hbase.ServerName;
046import org.apache.hadoop.hbase.TableName;
047import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
048import org.apache.hadoop.hbase.ipc.HBaseRpcController;
049import org.apache.hadoop.hbase.security.User;
050import org.apache.hadoop.hbase.security.UserProvider;
051import org.apache.hadoop.hbase.util.Bytes;
052import org.apache.hadoop.hbase.util.ReflectionUtils;
053import org.apache.hadoop.ipc.RemoteException;
054import org.apache.hadoop.net.DNS;
055import org.apache.yetus.audience.InterfaceAudience;
056import org.slf4j.Logger;
057import org.slf4j.LoggerFactory;
058
059import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
060import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
061import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
062import org.apache.hbase.thirdparty.io.netty.util.Timer;
063
064import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
065import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
066import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
067import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
068import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
069
070/**
071 * Utility used by client connections.
072 */
073@InterfaceAudience.Private
074public final class ConnectionUtils {
075
076  private static final Logger LOG = LoggerFactory.getLogger(ConnectionUtils.class);
077
078  private ConnectionUtils() {
079  }
080
081  /**
082   * Calculate pause time. Built on {@link HConstants#RETRY_BACKOFF}.
083   * @param pause time to pause
084   * @param tries amount of tries
085   * @return How long to wait after <code>tries</code> retries
086   */
087  public static long getPauseTime(final long pause, final int tries) {
088    int ntries = tries;
089    if (ntries >= HConstants.RETRY_BACKOFF.length) {
090      ntries = HConstants.RETRY_BACKOFF.length - 1;
091    }
092    if (ntries < 0) {
093      ntries = 0;
094    }
095
096    long normalPause = pause * HConstants.RETRY_BACKOFF[ntries];
097    // 1% possible jitter
098    long jitter = (long) (normalPause * ThreadLocalRandom.current().nextFloat() * 0.01f);
099    return normalPause + jitter;
100  }
101
102  /**
103   * @param conn The connection for which to replace the generator.
104   * @param cnm Replaces the nonce generator used, for testing.
105   * @return old nonce generator.
106   */
107  public static NonceGenerator injectNonceGeneratorForTesting(ClusterConnection conn,
108      NonceGenerator cnm) {
109    return ConnectionImplementation.injectNonceGeneratorForTesting(conn, cnm);
110  }
111
112  /**
113   * Changes the configuration to set the number of retries needed when using Connection internally,
114   * e.g. for updating catalog tables, etc. Call this method before we create any Connections.
115   * @param c The Configuration instance to set the retries into.
116   * @param log Used to log what we set in here.
117   */
118  public static void setServerSideHConnectionRetriesConfig(final Configuration c, final String sn,
119      final Logger log) {
120    // TODO: Fix this. Not all connections from server side should have 10 times the retries.
121    int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
122      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
123    // Go big. Multiply by 10. If we can't get to meta after this many retries
124    // then something seriously wrong.
125    int serversideMultiplier = c.getInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER,
126      HConstants.DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER);
127    int retries = hcRetries * serversideMultiplier;
128    c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
129    log.info(sn + " server-side Connection retries=" + retries);
130  }
131
132  /**
133   * A ClusterConnection that will short-circuit RPC making direct invocations against the localhost
134   * if the invocation target is 'this' server; save on network and protobuf invocations.
135   */
136  // TODO This has to still do PB marshalling/unmarshalling stuff. Check how/whether we can avoid.
137  @VisibleForTesting // Class is visible so can assert we are short-circuiting when expected.
138  public static class ShortCircuitingClusterConnection extends ConnectionImplementation {
139    private final ServerName serverName;
140    private final AdminService.BlockingInterface localHostAdmin;
141    private final ClientService.BlockingInterface localHostClient;
142
143    private ShortCircuitingClusterConnection(Configuration conf, ExecutorService pool, User user,
144        ServerName serverName, AdminService.BlockingInterface admin,
145        ClientService.BlockingInterface client) throws IOException {
146      super(conf, pool, user);
147      this.serverName = serverName;
148      this.localHostAdmin = admin;
149      this.localHostClient = client;
150    }
151
152    @Override
153    public AdminService.BlockingInterface getAdmin(ServerName sn) throws IOException {
154      return serverName.equals(sn) ? this.localHostAdmin : super.getAdmin(sn);
155    }
156
157    @Override
158    public ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
159      return serverName.equals(sn) ? this.localHostClient : super.getClient(sn);
160    }
161
162    @Override
163    public MasterKeepAliveConnection getMaster() throws IOException {
164      if (this.localHostClient instanceof MasterService.BlockingInterface) {
165        return new ShortCircuitMasterConnection(
166          (MasterService.BlockingInterface) this.localHostClient);
167      }
168      return super.getMaster();
169    }
170  }
171
172  /**
173   * Creates a short-circuit connection that can bypass the RPC layer (serialization,
174   * deserialization, networking, etc..) when talking to a local server.
175   * @param conf the current configuration
176   * @param pool the thread pool to use for batch operations
177   * @param user the user the connection is for
178   * @param serverName the local server name
179   * @param admin the admin interface of the local server
180   * @param client the client interface of the local server
181   * @return an short-circuit connection.
182   * @throws IOException if IO failure occurred
183   */
184  public static ClusterConnection createShortCircuitConnection(final Configuration conf,
185      ExecutorService pool, User user, final ServerName serverName,
186      final AdminService.BlockingInterface admin, final ClientService.BlockingInterface client)
187      throws IOException {
188    if (user == null) {
189      user = UserProvider.instantiate(conf).getCurrent();
190    }
191    return new ShortCircuitingClusterConnection(conf, pool, user, serverName, admin, client);
192  }
193
194  /**
195   * Setup the connection class, so that it will not depend on master being online. Used for testing
196   * @param conf configuration to set
197   */
198  @VisibleForTesting
199  public static void setupMasterlessConnection(Configuration conf) {
200    conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, MasterlessConnection.class.getName());
201  }
202
203  /**
204   * Some tests shut down the master. But table availability is a master RPC which is performed on
205   * region re-lookups.
206   */
207  static class MasterlessConnection extends ConnectionImplementation {
208    MasterlessConnection(Configuration conf, ExecutorService pool, User user) throws IOException {
209      super(conf, pool, user);
210    }
211
212    @Override
213    public boolean isTableDisabled(TableName tableName) throws IOException {
214      // treat all tables as enabled
215      return false;
216    }
217  }
218
219  /**
220   * Return retires + 1. The returned value will be in range [1, Integer.MAX_VALUE].
221   */
222  static int retries2Attempts(int retries) {
223    return Math.max(1, retries == Integer.MAX_VALUE ? Integer.MAX_VALUE : retries + 1);
224  }
225
226  /**
227   * Get a unique key for the rpc stub to the given server.
228   */
229  static String getStubKey(String serviceName, ServerName serverName, boolean hostnameCanChange) {
230    // Sometimes, servers go down and they come back up with the same hostname but a different
231    // IP address. Force a resolution of the rsHostname by trying to instantiate an
232    // InetSocketAddress, and this way we will rightfully get a new stubKey.
233    // Also, include the hostname in the key so as to take care of those cases where the
234    // DNS name is different but IP address remains the same.
235    String hostname = serverName.getHostname();
236    int port = serverName.getPort();
237    if (hostnameCanChange) {
238      try {
239        InetAddress ip = InetAddress.getByName(hostname);
240        return serviceName + "@" + hostname + "-" + ip.getHostAddress() + ":" + port;
241      } catch (UnknownHostException e) {
242        LOG.warn("Can not resolve " + hostname + ", please check your network", e);
243      }
244    }
245    return serviceName + "@" + hostname + ":" + port;
246  }
247
248  static void checkHasFamilies(Mutation mutation) {
249    Preconditions.checkArgument(mutation.numFamilies() > 0,
250      "Invalid arguments to %s, zero columns specified", mutation.toString());
251  }
252
253  /** Dummy nonce generator for disabled nonces. */
254  static final NonceGenerator NO_NONCE_GENERATOR = new NonceGenerator() {
255
256    @Override
257    public long newNonce() {
258      return HConstants.NO_NONCE;
259    }
260
261    @Override
262    public long getNonceGroup() {
263      return HConstants.NO_NONCE;
264    }
265  };
266
267  // A byte array in which all elements are the max byte, and it is used to
268  // construct closest front row
269  static final byte[] MAX_BYTE_ARRAY = Bytes.createMaxByteArray(9);
270
271  /**
272   * Create the closest row after the specified row
273   */
274  static byte[] createClosestRowAfter(byte[] row) {
275    return Arrays.copyOf(row, row.length + 1);
276  }
277
278  /**
279   * Create a row before the specified row and very close to the specified row.
280   */
281  static byte[] createCloseRowBefore(byte[] row) {
282    if (row.length == 0) {
283      return MAX_BYTE_ARRAY;
284    }
285    if (row[row.length - 1] == 0) {
286      return Arrays.copyOf(row, row.length - 1);
287    } else {
288      byte[] nextRow = new byte[row.length + MAX_BYTE_ARRAY.length];
289      System.arraycopy(row, 0, nextRow, 0, row.length - 1);
290      nextRow[row.length - 1] = (byte) ((row[row.length - 1] & 0xFF) - 1);
291      System.arraycopy(MAX_BYTE_ARRAY, 0, nextRow, row.length, MAX_BYTE_ARRAY.length);
292      return nextRow;
293    }
294  }
295
296  static boolean isEmptyStartRow(byte[] row) {
297    return Bytes.equals(row, EMPTY_START_ROW);
298  }
299
300  static boolean isEmptyStopRow(byte[] row) {
301    return Bytes.equals(row, EMPTY_END_ROW);
302  }
303
304  static void resetController(HBaseRpcController controller, long timeoutNs) {
305    controller.reset();
306    if (timeoutNs >= 0) {
307      controller.setCallTimeout(
308        (int) Math.min(Integer.MAX_VALUE, TimeUnit.NANOSECONDS.toMillis(timeoutNs)));
309    }
310  }
311
312  static Throwable translateException(Throwable t) {
313    if (t instanceof UndeclaredThrowableException && t.getCause() != null) {
314      t = t.getCause();
315    }
316    if (t instanceof RemoteException) {
317      t = ((RemoteException) t).unwrapRemoteException();
318    }
319    if (t instanceof ServiceException && t.getCause() != null) {
320      t = translateException(t.getCause());
321    }
322    return t;
323  }
324
325  static long calcEstimatedSize(Result rs) {
326    long estimatedHeapSizeOfResult = 0;
327    // We don't make Iterator here
328    for (Cell cell : rs.rawCells()) {
329      estimatedHeapSizeOfResult += PrivateCellUtil.estimatedSizeOfCell(cell);
330    }
331    return estimatedHeapSizeOfResult;
332  }
333
334  static Result filterCells(Result result, Cell keepCellsAfter) {
335    if (keepCellsAfter == null) {
336      // do not need to filter
337      return result;
338    }
339    // not the same row
340    if (!PrivateCellUtil.matchingRows(keepCellsAfter, result.getRow(), 0, result.getRow().length)) {
341      return result;
342    }
343    Cell[] rawCells = result.rawCells();
344    int index = Arrays.binarySearch(rawCells, keepCellsAfter,
345      CellComparator.getInstance()::compareWithoutRow);
346    if (index < 0) {
347      index = -index - 1;
348    } else {
349      index++;
350    }
351    if (index == 0) {
352      return result;
353    }
354    if (index == rawCells.length) {
355      return null;
356    }
357    return Result.create(Arrays.copyOfRange(rawCells, index, rawCells.length), null,
358      result.isStale(), result.mayHaveMoreCellsInRow());
359  }
360
361  // Add a delta to avoid timeout immediately after a retry sleeping.
362  static final long SLEEP_DELTA_NS = TimeUnit.MILLISECONDS.toNanos(1);
363
364  static Get toCheckExistenceOnly(Get get) {
365    if (get.isCheckExistenceOnly()) {
366      return get;
367    }
368    return ReflectionUtils.newInstance(get.getClass(), get).setCheckExistenceOnly(true);
369  }
370
371  static List<Get> toCheckExistenceOnly(List<Get> gets) {
372    return gets.stream().map(ConnectionUtils::toCheckExistenceOnly).collect(toList());
373  }
374
375  static RegionLocateType getLocateType(Scan scan) {
376    if (scan.isReversed()) {
377      if (isEmptyStartRow(scan.getStartRow())) {
378        return RegionLocateType.BEFORE;
379      } else {
380        return scan.includeStartRow() ? RegionLocateType.CURRENT : RegionLocateType.BEFORE;
381      }
382    } else {
383      return scan.includeStartRow() ? RegionLocateType.CURRENT : RegionLocateType.AFTER;
384    }
385  }
386
387  static boolean noMoreResultsForScan(Scan scan, RegionInfo info) {
388    if (isEmptyStopRow(info.getEndKey())) {
389      return true;
390    }
391    if (isEmptyStopRow(scan.getStopRow())) {
392      return false;
393    }
394    int c = Bytes.compareTo(info.getEndKey(), scan.getStopRow());
395    // 1. if our stop row is less than the endKey of the region
396    // 2. if our stop row is equal to the endKey of the region and we do not include the stop row
397    // for scan.
398    return c > 0 || (c == 0 && !scan.includeStopRow());
399  }
400
401  static boolean noMoreResultsForReverseScan(Scan scan, RegionInfo info) {
402    if (isEmptyStartRow(info.getStartKey())) {
403      return true;
404    }
405    if (isEmptyStopRow(scan.getStopRow())) {
406      return false;
407    }
408    // no need to test the inclusive of the stop row as the start key of a region is included in
409    // the region.
410    return Bytes.compareTo(info.getStartKey(), scan.getStopRow()) <= 0;
411  }
412
413  static <T> CompletableFuture<List<T>> allOf(List<CompletableFuture<T>> futures) {
414    return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))
415      .thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList()));
416  }
417
418  public static ScanResultCache createScanResultCache(Scan scan) {
419    if (scan.getAllowPartialResults()) {
420      return new AllowPartialScanResultCache();
421    } else if (scan.getBatch() > 0) {
422      return new BatchScanResultCache(scan.getBatch());
423    } else {
424      return new CompleteScanResultCache();
425    }
426  }
427
428  private static final String MY_ADDRESS = getMyAddress();
429
430  private static String getMyAddress() {
431    try {
432      return DNS.getDefaultHost("default", "default");
433    } catch (UnknownHostException uhe) {
434      LOG.error("cannot determine my address", uhe);
435      return null;
436    }
437  }
438
439  static boolean isRemote(String host) {
440    return !host.equalsIgnoreCase(MY_ADDRESS);
441  }
442
443  static void incRPCCallsMetrics(ScanMetrics scanMetrics, boolean isRegionServerRemote) {
444    if (scanMetrics == null) {
445      return;
446    }
447    scanMetrics.countOfRPCcalls.incrementAndGet();
448    if (isRegionServerRemote) {
449      scanMetrics.countOfRemoteRPCcalls.incrementAndGet();
450    }
451  }
452
453  static void incRPCRetriesMetrics(ScanMetrics scanMetrics, boolean isRegionServerRemote) {
454    if (scanMetrics == null) {
455      return;
456    }
457    scanMetrics.countOfRPCRetries.incrementAndGet();
458    if (isRegionServerRemote) {
459      scanMetrics.countOfRemoteRPCRetries.incrementAndGet();
460    }
461  }
462
463  static void updateResultsMetrics(ScanMetrics scanMetrics, Result[] rrs,
464      boolean isRegionServerRemote) {
465    if (scanMetrics == null || rrs == null || rrs.length == 0) {
466      return;
467    }
468    long resultSize = 0;
469    for (Result rr : rrs) {
470      for (Cell cell : rr.rawCells()) {
471        resultSize += PrivateCellUtil.estimatedSerializedSizeOf(cell);
472      }
473    }
474    scanMetrics.countOfBytesInResults.addAndGet(resultSize);
475    if (isRegionServerRemote) {
476      scanMetrics.countOfBytesInRemoteResults.addAndGet(resultSize);
477    }
478  }
479
480  /**
481   * Use the scan metrics returned by the server to add to the identically named counters in the
482   * client side metrics. If a counter does not exist with the same name as the server side metric,
483   * the attempt to increase the counter will fail.
484   */
485  static void updateServerSideMetrics(ScanMetrics scanMetrics, ScanResponse response) {
486    if (scanMetrics == null || response == null || !response.hasScanMetrics()) {
487      return;
488    }
489    ResponseConverter.getScanMetrics(response).forEach(scanMetrics::addToCounter);
490  }
491
492  static void incRegionCountMetrics(ScanMetrics scanMetrics) {
493    if (scanMetrics == null) {
494      return;
495    }
496    scanMetrics.countOfRegions.incrementAndGet();
497  }
498
499  /**
500   * Connect the two futures, if the src future is done, then mark the dst future as done. And if
501   * the dst future is done, then cancel the src future. This is used for timeline consistent read.
502   */
503  private static <T> void connect(CompletableFuture<T> srcFuture, CompletableFuture<T> dstFuture) {
504    addListener(srcFuture, (r, e) -> {
505      if (e != null) {
506        dstFuture.completeExceptionally(e);
507      } else {
508        dstFuture.complete(r);
509      }
510    });
511    // The cancellation may be a dummy one as the dstFuture may be completed by this srcFuture.
512    // Notice that this is a bit tricky, as the execution chain maybe 'complete src -> complete dst
513    // -> cancel src', for now it seems to be fine, as the will use CAS to set the result first in
514    // CompletableFuture. If later this causes problems, we could use whenCompleteAsync to break the
515    // tie.
516    addListener(dstFuture, (r, e) -> srcFuture.cancel(false));
517  }
518
519  private static <T> void sendRequestsToSecondaryReplicas(
520      Function<Integer, CompletableFuture<T>> requestReplica, RegionLocations locs,
521      CompletableFuture<T> future) {
522    if (future.isDone()) {
523      // do not send requests to secondary replicas if the future is done, i.e, the primary request
524      // has already been finished.
525      return;
526    }
527    for (int replicaId = 1, n = locs.size(); replicaId < n; replicaId++) {
528      CompletableFuture<T> secondaryFuture = requestReplica.apply(replicaId);
529      connect(secondaryFuture, future);
530    }
531  }
532
533  static <T> CompletableFuture<T> timelineConsistentRead(AsyncRegionLocator locator,
534      TableName tableName, Query query, byte[] row, RegionLocateType locateType,
535      Function<Integer, CompletableFuture<T>> requestReplica, long rpcTimeoutNs,
536      long primaryCallTimeoutNs, Timer retryTimer) {
537    if (query.getConsistency() == Consistency.STRONG) {
538      return requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID);
539    }
540    // user specifies a replica id explicitly, just send request to the specific replica
541    if (query.getReplicaId() >= 0) {
542      return requestReplica.apply(query.getReplicaId());
543    }
544    // Timeline consistent read, where we may send requests to other region replicas
545    CompletableFuture<T> primaryFuture = requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID);
546    CompletableFuture<T> future = new CompletableFuture<>();
547    connect(primaryFuture, future);
548    long startNs = System.nanoTime();
549    // after the getRegionLocations, all the locations for the replicas of this region should have
550    // been cached, so it is not big deal to locate them again when actually sending requests to
551    // these replicas.
552    addListener(locator.getRegionLocations(tableName, row, locateType, false, rpcTimeoutNs),
553      (locs, error) -> {
554        if (error != null) {
555          LOG.warn(
556            "Failed to locate all the replicas for table={}, row='{}', locateType={}" +
557              " give up timeline consistent read",
558            tableName, Bytes.toStringBinary(row), locateType, error);
559          return;
560        }
561        if (locs.size() <= 1) {
562          LOG.warn(
563            "There are no secondary replicas for region {}, give up timeline consistent read",
564            locs.getDefaultRegionLocation().getRegion());
565          return;
566        }
567        long delayNs = primaryCallTimeoutNs - (System.nanoTime() - startNs);
568        if (delayNs <= 0) {
569          sendRequestsToSecondaryReplicas(requestReplica, locs, future);
570        } else {
571          retryTimer.newTimeout(
572            timeout -> sendRequestsToSecondaryReplicas(requestReplica, locs, future), delayNs,
573            TimeUnit.NANOSECONDS);
574        }
575      });
576    return future;
577  }
578
579  static <T> CompletableFuture<T> getOrFetch(AtomicReference<T> cacheRef,
580      AtomicReference<CompletableFuture<T>> futureRef, boolean reload,
581      Supplier<CompletableFuture<T>> fetch, Predicate<T> validator, String type) {
582    for (;;) {
583      if (!reload) {
584        T value = cacheRef.get();
585        if (value != null && validator.test(value)) {
586          return CompletableFuture.completedFuture(value);
587        }
588      }
589      LOG.trace("{} cache is null, try fetching from registry", type);
590      if (futureRef.compareAndSet(null, new CompletableFuture<>())) {
591        LOG.debug("Start fetching{} from registry", type);
592        CompletableFuture<T> future = futureRef.get();
593        addListener(fetch.get(), (value, error) -> {
594          if (error != null) {
595            LOG.debug("Failed to fetch {} from registry", type, error);
596            futureRef.getAndSet(null).completeExceptionally(error);
597            return;
598          }
599          LOG.debug("The fetched {} is {}", type, value);
600          // Here we update cache before reset future, so it is possible that someone can get a
601          // stale value. Consider this:
602          // 1. update cacheRef
603          // 2. someone clears the cache and relocates again
604          // 3. the futureRef is not null so the old future is used.
605          // 4. we clear futureRef and complete the future in it with the value being
606          // cleared in step 2.
607          // But we do not think it is a big deal as it rarely happens, and even if it happens, the
608          // caller will retry again later, no correctness problems.
609          cacheRef.set(value);
610          futureRef.set(null);
611          future.complete(value);
612        });
613        return future;
614      } else {
615        CompletableFuture<T> future = futureRef.get();
616        if (future != null) {
617          return future;
618        }
619      }
620    }
621  }
622}