001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master;
019
020import java.io.IOException;
021import java.net.InetSocketAddress;
022import java.util.ArrayList;
023import java.util.Collection;
024import java.util.Collections;
025import java.util.HashMap;
026import java.util.List;
027import java.util.Map;
028import java.util.Map.Entry;
029import java.util.Optional;
030import java.util.Random;
031import java.util.TreeMap;
032import java.util.concurrent.ConcurrentSkipListMap;
033import org.apache.hadoop.conf.Configuration;
034import org.apache.hadoop.fs.FileSystem;
035import org.apache.hadoop.hbase.Abortable;
036import org.apache.hadoop.hbase.CellScannable;
037import org.apache.hadoop.hbase.CellUtil;
038import org.apache.hadoop.hbase.ChoreService;
039import org.apache.hadoop.hbase.CoordinatedStateManager;
040import org.apache.hadoop.hbase.ServerName;
041import org.apache.hadoop.hbase.TableDescriptors;
042import org.apache.hadoop.hbase.TableName;
043import org.apache.hadoop.hbase.ZooKeeperConnectionException;
044import org.apache.hadoop.hbase.client.ClusterConnection;
045import org.apache.hadoop.hbase.client.Connection;
046import org.apache.hadoop.hbase.client.RegionInfo;
047import org.apache.hadoop.hbase.client.RegionInfoBuilder;
048import org.apache.hadoop.hbase.client.Result;
049import org.apache.hadoop.hbase.client.Scan;
050import org.apache.hadoop.hbase.client.locking.EntityLock;
051import org.apache.hadoop.hbase.executor.ExecutorService;
052import org.apache.hadoop.hbase.io.hfile.BlockCache;
053import org.apache.hadoop.hbase.ipc.HBaseRpcController;
054import org.apache.hadoop.hbase.ipc.RpcServerInterface;
055import org.apache.hadoop.hbase.mob.MobFileCache;
056import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
057import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
058import org.apache.hadoop.hbase.quotas.RegionSizeStore;
059import org.apache.hadoop.hbase.regionserver.FlushRequester;
060import org.apache.hadoop.hbase.regionserver.HRegion;
061import org.apache.hadoop.hbase.regionserver.HeapMemoryManager;
062import org.apache.hadoop.hbase.regionserver.LeaseManager;
063import org.apache.hadoop.hbase.regionserver.MetricsRegionServer;
064import org.apache.hadoop.hbase.regionserver.Region;
065import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
066import org.apache.hadoop.hbase.regionserver.RegionServerServices;
067import org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager;
068import org.apache.hadoop.hbase.regionserver.ServerNonceManager;
069import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester;
070import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
071import org.apache.hadoop.hbase.security.access.AccessChecker;
072import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher;
073import org.apache.hadoop.hbase.util.Bytes;
074import org.apache.hadoop.hbase.wal.WAL;
075import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
076
077import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
078import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
079
080import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
081import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
082import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
083import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
084import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse;
086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearSlowLogResponseRequest;
087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearSlowLogResponses;
088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest;
093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse;
094import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
095import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
096import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
097import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
098import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
099import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
100import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
101import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
102import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
103import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
104import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
105import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
106import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
107import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse;
108import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
109import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
110import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
111import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
112import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
113import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
114import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SlowLogResponseRequest;
115import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SlowLogResponses;
116import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
117import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
118import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
119import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
120import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
121import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
122import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
123import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse;
124import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
125import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
126import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
127import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
128import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadResponse;
129import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
130import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
131import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest;
132import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse;
133import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest;
134import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest;
135import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse;
136import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
137import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
138import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
139import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
140import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest;
141import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
142
143/**
144 * A mock RegionServer implementation.
145 * Use this when you can't bend Mockito to your liking (e.g. return null result
146 * when 'scanning' until master timesout and then return a coherent meta row
147 * result thereafter.  Have some facility for faking gets and scans.  See
148 * setGetResult(byte[], byte[], Result) for how to fill the backing data
149 * store that the get pulls from.
150 */
151class MockRegionServer implements AdminProtos.AdminService.BlockingInterface,
152    ClientProtos.ClientService.BlockingInterface, RegionServerServices {
153  private final ServerName sn;
154  private final ZKWatcher zkw;
155  private final Configuration conf;
156  private final Random random = new Random();
157
158  /**
159   * Map of regions to map of rows and {@link Result}. Used as data source when
160   * {@link #get(RpcController, ClientProtos.GetRequest)} is called. Because we have a byte
161   * key, need to use TreeMap and provide a Comparator.  Use
162   * {@link #setGetResult(byte[], byte[], Result)} filling this map.
163   */
164  private final Map<byte [], Map<byte [], Result>> gets = new TreeMap<>(Bytes.BYTES_COMPARATOR);
165
166  /**
167   * Map of regions to results to return when scanning.
168   */
169  private final Map<byte [], Result []> nexts = new TreeMap<>(Bytes.BYTES_COMPARATOR);
170
171  /**
172   * Data structure that holds regionname and index used scanning.
173   */
174  class RegionNameAndIndex {
175    private final byte[] regionName;
176    private int index = 0;
177
178    RegionNameAndIndex(final byte[] regionName) {
179      this.regionName = regionName;
180    }
181
182    byte[] getRegionName() {
183      return this.regionName;
184    }
185
186    int getThenIncrement() {
187      int currentIndex = this.index;
188      this.index++;
189      return currentIndex;
190    }
191  }
192
193  /**
194   * Outstanding scanners and their offset into <code>nexts</code>
195   */
196  private final Map<Long, RegionNameAndIndex> scannersAndOffsets = new HashMap<>();
197
198  /**
199   * @param sn Name of this mock regionserver
200   * @throws IOException
201   * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
202   */
203  MockRegionServer(final Configuration conf, final ServerName sn)
204  throws ZooKeeperConnectionException, IOException {
205    this.sn = sn;
206    this.conf = conf;
207    this.zkw = new ZKWatcher(conf, sn.toString(), this, true);
208  }
209
210  /**
211   * Use this method filling the backing data source used by
212   * {@link #get(RpcController, ClientProtos.GetRequest)}
213   * @param regionName the region name to assign
214   * @param row the row key
215   * @param r the single row result
216   */
217  void setGetResult(final byte [] regionName, final byte [] row, final Result r) {
218    Map<byte [], Result> value = this.gets.get(regionName);
219    if (value == null) {
220      // If no value already, create one.  Needs to be treemap because we are
221      // using byte array as key.   Not thread safe.
222      value = new TreeMap<>(Bytes.BYTES_COMPARATOR);
223      this.gets.put(regionName, value);
224    }
225    value.put(row, r);
226  }
227
228  /**
229   * Use this method to set what a scanner will reply as we next through
230   * @param regionName
231   * @param rs
232   */
233  void setNextResults(final byte [] regionName, final Result [] rs) {
234    this.nexts.put(regionName, rs);
235  }
236
237  @Override
238  public boolean isStopped() {
239    return false;
240  }
241
242  @Override
243  public void abort(String why, Throwable e) {
244    throw new RuntimeException(this.sn + ": " + why, e);
245  }
246
247  @Override
248  public boolean isAborted() {
249    return false;
250  }
251
252  public long openScanner(byte[] regionName, Scan scan) throws IOException {
253    long scannerId = this.random.nextLong();
254    this.scannersAndOffsets.put(scannerId, new RegionNameAndIndex(regionName));
255    return scannerId;
256  }
257
258  public Result next(long scannerId) throws IOException {
259    RegionNameAndIndex rnai = this.scannersAndOffsets.get(scannerId);
260    int index = rnai.getThenIncrement();
261    Result [] results = this.nexts.get(rnai.getRegionName());
262    if (results == null) return null;
263    return index < results.length? results[index]: null;
264  }
265
266  public Result [] next(long scannerId, int numberOfRows) throws IOException {
267    // Just return one result whatever they ask for.
268    Result r = next(scannerId);
269    return r == null? null: new Result [] {r};
270  }
271
272  public void close(final long scannerId) throws IOException {
273    this.scannersAndOffsets.remove(scannerId);
274  }
275
276  @Override
277  public void stop(String why) {
278    this.zkw.close();
279  }
280
281  @Override
282  public void addRegion(HRegion r) {
283  }
284
285  @Override
286  public boolean removeRegion(HRegion r, ServerName destination) {
287    return false;
288  }
289
290  @Override
291  public HRegion getRegion(String encodedRegionName) {
292    return null;
293  }
294
295  @Override
296  public Configuration getConfiguration() {
297    return this.conf;
298  }
299
300  @Override
301  public ZKWatcher getZooKeeper() {
302    return this.zkw;
303  }
304
305  @Override
306  public CoordinatedStateManager getCoordinatedStateManager() {
307    return null;
308  }
309
310  @Override
311  public ClusterConnection getConnection() {
312    return null;
313  }
314
315  @Override
316  public ServerName getServerName() {
317    return this.sn;
318  }
319
320  @Override
321  public boolean isStopping() {
322    return false;
323  }
324
325  @Override
326  public FlushRequester getFlushRequester() {
327    return null;
328  }
329  @Override
330  public CompactionRequester getCompactionRequestor() {
331    return null;
332  }
333  @Override
334  public RegionServerAccounting getRegionServerAccounting() {
335    return null;
336  }
337
338  @Override
339  public RegionServerRpcQuotaManager getRegionServerRpcQuotaManager() {
340    return null;
341  }
342
343  @Override
344  public void postOpenDeployTasks(PostOpenDeployContext context) throws IOException {
345  }
346
347  @Override
348  public RpcServerInterface getRpcServer() {
349    return null;
350  }
351
352  @Override
353  public ConcurrentSkipListMap<byte[], Boolean> getRegionsInTransitionInRS() {
354    return null;
355  }
356
357  @Override
358  public FileSystem getFileSystem() {
359    return null;
360  }
361
362  @Override
363  public GetResponse get(RpcController controller, GetRequest request)
364  throws ServiceException {
365    byte[] regionName = request.getRegion().getValue().toByteArray();
366    Map<byte [], Result> m = this.gets.get(regionName);
367    GetResponse.Builder builder = GetResponse.newBuilder();
368    if (m != null) {
369      byte[] row = request.getGet().getRow().toByteArray();
370      builder.setResult(ProtobufUtil.toResult(m.get(row)));
371    }
372    return builder.build();
373  }
374
375  @Override
376  public MutateResponse mutate(RpcController controller, MutateRequest request)
377      throws ServiceException {
378    return null;
379  }
380
381  @Override
382  public ScanResponse scan(RpcController controller, ScanRequest request)
383      throws ServiceException {
384    ScanResponse.Builder builder = ScanResponse.newBuilder();
385    try {
386      if (request.hasScan()) {
387        byte[] regionName = request.getRegion().getValue().toByteArray();
388        builder.setScannerId(openScanner(regionName, null));
389        builder.setMoreResults(true);
390      }
391      else {
392        long scannerId = request.getScannerId();
393        Result result = next(scannerId);
394        if (result != null) {
395          builder.addCellsPerResult(result.size());
396          List<CellScannable> results = new ArrayList<>(1);
397          results.add(result);
398          ((HBaseRpcController) controller).setCellScanner(CellUtil
399              .createCellScanner(results));
400          builder.setMoreResults(true);
401        }
402        else {
403          builder.setMoreResults(false);
404          close(scannerId);
405        }
406      }
407    } catch (IOException ie) {
408      throw new ServiceException(ie);
409    }
410    return builder.build();
411  }
412
413  @Override
414  public BulkLoadHFileResponse bulkLoadHFile(RpcController controller,
415      BulkLoadHFileRequest request) throws ServiceException {
416    return null;
417  }
418
419  @Override
420  public ClientProtos.CoprocessorServiceResponse execService(RpcController controller,
421      ClientProtos.CoprocessorServiceRequest request) throws ServiceException {
422    return null;
423  }
424
425  @Override
426  public ClientProtos.MultiResponse multi(
427      RpcController controller, MultiRequest request) throws ServiceException {
428    return null;
429  }
430
431  @Override
432  public GetRegionInfoResponse getRegionInfo(RpcController controller,
433      GetRegionInfoRequest request) throws ServiceException {
434    GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
435    builder.setRegionInfo(ProtobufUtil.toRegionInfo(RegionInfoBuilder.FIRST_META_REGIONINFO));
436    return builder.build();
437  }
438
439  @Override
440  public GetRegionLoadResponse getRegionLoad(RpcController controller,
441      GetRegionLoadRequest request) throws ServiceException {
442    GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder();
443    return builder.build();
444  }
445
446  @Override
447  public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller,
448    ClearCompactionQueuesRequest request) throws ServiceException {
449    return null;
450  }
451
452  @Override
453  public GetStoreFileResponse getStoreFile(RpcController controller,
454      GetStoreFileRequest request) throws ServiceException {
455    return null;
456  }
457
458  @Override
459  public GetOnlineRegionResponse getOnlineRegion(RpcController controller,
460      GetOnlineRegionRequest request) throws ServiceException {
461    return null;
462  }
463
464  @Override
465  public List<Region> getRegions() {
466    return null;
467  }
468
469  @Override
470  public OpenRegionResponse openRegion(RpcController controller,
471      OpenRegionRequest request) throws ServiceException {
472    return null;
473  }
474
475  @Override
476  public WarmupRegionResponse warmupRegion(RpcController controller,
477      WarmupRegionRequest request) throws ServiceException {
478    return null;
479  }
480  @Override
481  public CloseRegionResponse closeRegion(RpcController controller,
482      CloseRegionRequest request) throws ServiceException {
483    return null;
484  }
485
486  @Override
487  public FlushRegionResponse flushRegion(RpcController controller,
488      FlushRegionRequest request) throws ServiceException {
489    return null;
490  }
491
492  @Override
493  public CompactionSwitchResponse compactionSwitch(RpcController controller,
494      CompactionSwitchRequest request) throws ServiceException {
495    return null;
496  }
497
498  @Override
499  public CompactRegionResponse compactRegion(RpcController controller,
500      CompactRegionRequest request) throws ServiceException {
501    return null;
502  }
503
504  @Override
505  public ReplicateWALEntryResponse replicateWALEntry(RpcController controller,
506      ReplicateWALEntryRequest request) throws ServiceException {
507    return null;
508  }
509
510  @Override
511  public RollWALWriterResponse rollWALWriter(RpcController controller,
512      RollWALWriterRequest request) throws ServiceException {
513    return null;
514  }
515
516  @Override
517  public GetServerInfoResponse getServerInfo(RpcController controller,
518      GetServerInfoRequest request) throws ServiceException {
519    return null;
520  }
521
522  @Override
523  public StopServerResponse stopServer(RpcController controller,
524      StopServerRequest request) throws ServiceException {
525    return null;
526  }
527
528  @Override
529  public List<Region> getRegions(TableName tableName) throws IOException {
530    return null;
531  }
532
533  @Override
534  public LeaseManager getLeaseManager() {
535    return null;
536  }
537
538  @Override
539  public List<WAL> getWALs() throws IOException {
540    return Collections.emptyList();
541  }
542
543  @Override
544  public WAL getWAL(RegionInfo regionInfo) throws IOException {
545    return null;
546  }
547
548  @Override
549  public ExecutorService getExecutorService() {
550    return null;
551  }
552
553  @Override
554  public ChoreService getChoreService() {
555    return null;
556  }
557
558  @Override
559  public void updateRegionFavoredNodesMapping(String encodedRegionName,
560      List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> favoredNodes) {
561  }
562
563  @Override
564  public InetSocketAddress[] getFavoredNodesForRegion(String encodedRegionName) {
565    return null;
566  }
567
568  @Override
569  public ReplicateWALEntryResponse
570      replay(RpcController controller, ReplicateWALEntryRequest request)
571      throws ServiceException {
572    return null;
573  }
574
575  @Override
576  public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller,
577      UpdateFavoredNodesRequest request) throws ServiceException {
578    return null;
579  }
580
581  @Override
582  public ServerNonceManager getNonceManager() {
583    return null;
584  }
585
586  @Override
587  public boolean reportRegionStateTransition(RegionStateTransitionContext context) {
588    return false;
589  }
590
591  @Override
592  public boolean registerService(com.google.protobuf.Service service) {
593    return false;
594  }
595
596  @Override
597  public CoprocessorServiceResponse execRegionServerService(RpcController controller,
598      CoprocessorServiceRequest request) throws ServiceException {
599    return null;
600  }
601
602  @Override
603  public UpdateConfigurationResponse updateConfiguration(
604      RpcController controller, UpdateConfigurationRequest request)
605      throws ServiceException {
606    return null;
607  }
608
609  @Override
610  public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller,
611                                                             ClearRegionBlockCacheRequest request)
612    throws ServiceException {
613    return null;
614  }
615
616  @Override
617  public HeapMemoryManager getHeapMemoryManager() {
618    return null;
619  }
620
621  @Override
622  public double getCompactionPressure() {
623    return 0;
624  }
625
626  @Override
627  public ClusterConnection getClusterConnection() {
628    return null;
629  }
630
631  @Override
632  public ThroughputController getFlushThroughputController() {
633    return null;
634  }
635
636  @Override
637  public double getFlushPressure() {
638    return 0;
639  }
640
641  @Override
642  public MetricsRegionServer getMetrics() {
643    return null;
644  }
645
646  @Override
647  public EntityLock regionLock(List<RegionInfo> regionInfos, String description, Abortable abort)
648      throws IOException {
649    return null;
650  }
651
652  @Override
653  public PrepareBulkLoadResponse prepareBulkLoad(RpcController controller,
654      PrepareBulkLoadRequest request) throws ServiceException {
655    return null;
656  }
657
658  @Override
659  public CleanupBulkLoadResponse cleanupBulkLoad(RpcController controller,
660      CleanupBulkLoadRequest request) throws ServiceException {
661    return null;
662  }
663
664  @Override
665  public SecureBulkLoadManager getSecureBulkLoadManager() {
666    return null;
667  }
668
669  @Override
670  public void unassign(byte[] regionName) throws IOException {
671  }
672
673  @Override
674  public RegionServerSpaceQuotaManager getRegionServerSpaceQuotaManager() {
675    return null;
676  }
677
678  @Override
679  public ExecuteProceduresResponse executeProcedures(RpcController controller,
680      ExecuteProceduresRequest request) throws ServiceException {
681    return null;
682  }
683
684  @Override
685  public SlowLogResponses getSlowLogResponses(RpcController controller,
686      SlowLogResponseRequest request) throws ServiceException {
687    return null;
688  }
689
690  @Override
691  public SlowLogResponses getLargeLogResponses(RpcController controller,
692      SlowLogResponseRequest request) throws ServiceException {
693    return null;
694  }
695
696  @Override
697  public ClearSlowLogResponses clearSlowLogsResponses(RpcController controller,
698      ClearSlowLogResponseRequest request) throws ServiceException {
699    return null;
700  }
701
702  @Override
703  public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(
704      RpcController controller, GetSpaceQuotaSnapshotsRequest request)
705      throws ServiceException {
706    return null;
707  }
708
709  @Override
710  public Connection createConnection(Configuration conf) throws IOException {
711    return null;
712  }
713
714  @Override
715  public boolean isClusterUp() {
716    return true;
717  }
718
719  @Override
720  public TableDescriptors getTableDescriptors() {
721    return null;
722  }
723
724  @Override
725  public Optional<BlockCache> getBlockCache() {
726    return Optional.empty();
727  }
728
729  @Override
730  public Optional<MobFileCache> getMobFileCache() {
731    return Optional.empty();
732  }
733
734  @Override
735  public AccessChecker getAccessChecker() {
736    return null;
737  }
738
739  @Override
740  public ZKPermissionWatcher getZKPermissionWatcher() {
741    return null;
742  }
743
744  @Override
745  public boolean reportRegionSizesForQuotas(RegionSizeStore sizeStore) {
746    return true;
747  }
748
749  @Override
750  public boolean reportFileArchivalForQuotas(
751      TableName tableName, Collection<Entry<String, Long>> archivedFiles) {
752    return false;
753  }
754}