001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master;
019
020import java.io.IOException;
021import java.net.InetSocketAddress;
022import java.util.ArrayList;
023import java.util.Collection;
024import java.util.Collections;
025import java.util.HashMap;
026import java.util.List;
027import java.util.Map;
028import java.util.Map.Entry;
029import java.util.Optional;
030import java.util.TreeMap;
031import java.util.concurrent.ConcurrentSkipListMap;
032import java.util.concurrent.ThreadLocalRandom;
033import org.apache.hadoop.conf.Configuration;
034import org.apache.hadoop.fs.FileSystem;
035import org.apache.hadoop.hbase.Abortable;
036import org.apache.hadoop.hbase.CellScannable;
037import org.apache.hadoop.hbase.CellUtil;
038import org.apache.hadoop.hbase.ChoreService;
039import org.apache.hadoop.hbase.CoordinatedStateManager;
040import org.apache.hadoop.hbase.ServerName;
041import org.apache.hadoop.hbase.TableDescriptors;
042import org.apache.hadoop.hbase.TableName;
043import org.apache.hadoop.hbase.ZooKeeperConnectionException;
044import org.apache.hadoop.hbase.client.ClusterConnection;
045import org.apache.hadoop.hbase.client.Connection;
046import org.apache.hadoop.hbase.client.RegionInfo;
047import org.apache.hadoop.hbase.client.RegionInfoBuilder;
048import org.apache.hadoop.hbase.client.Result;
049import org.apache.hadoop.hbase.client.Scan;
050import org.apache.hadoop.hbase.client.locking.EntityLock;
051import org.apache.hadoop.hbase.executor.ExecutorService;
052import org.apache.hadoop.hbase.io.hfile.BlockCache;
053import org.apache.hadoop.hbase.ipc.HBaseRpcController;
054import org.apache.hadoop.hbase.ipc.RpcServerInterface;
055import org.apache.hadoop.hbase.mob.MobFileCache;
056import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
057import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
058import org.apache.hadoop.hbase.quotas.RegionSizeStore;
059import org.apache.hadoop.hbase.regionserver.FlushRequester;
060import org.apache.hadoop.hbase.regionserver.HRegion;
061import org.apache.hadoop.hbase.regionserver.HeapMemoryManager;
062import org.apache.hadoop.hbase.regionserver.LeaseManager;
063import org.apache.hadoop.hbase.regionserver.MetricsRegionServer;
064import org.apache.hadoop.hbase.regionserver.Region;
065import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
066import org.apache.hadoop.hbase.regionserver.RegionServerServices;
067import org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager;
068import org.apache.hadoop.hbase.regionserver.ServerNonceManager;
069import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester;
070import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
071import org.apache.hadoop.hbase.security.access.AccessChecker;
072import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher;
073import org.apache.hadoop.hbase.util.Bytes;
074import org.apache.hadoop.hbase.wal.WAL;
075import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
076
077import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
078import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
079
080import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
081import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
082import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
083import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
084import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse;
086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearSlowLogResponseRequest;
087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearSlowLogResponses;
088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest;
093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse;
094import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
095import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
096import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
097import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
098import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
099import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
100import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
101import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
102import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
103import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
104import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
105import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
106import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
107import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse;
108import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
109import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
110import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
111import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
112import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
113import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
114import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SlowLogResponseRequest;
115import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SlowLogResponses;
116import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
117import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
118import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
119import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
120import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
121import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
122import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
123import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse;
124import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
125import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
126import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
127import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
128import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadResponse;
129import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
130import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
131import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest;
132import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse;
133import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest;
134import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest;
135import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse;
136import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
137import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
138import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
139import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
140import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
141import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest;
142import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
143
144/**
145 * A mock RegionServer implementation. Use this when you can't bend Mockito to your liking (e.g.
146 * return null result when 'scanning' until master timesout and then return a coherent meta row
147 * result thereafter. Have some facility for faking gets and scans. See setGetResult(byte[], byte[],
148 * Result) for how to fill the backing data store that the get pulls from.
149 */
150class MockRegionServer implements AdminProtos.AdminService.BlockingInterface,
151  ClientProtos.ClientService.BlockingInterface, RegionServerServices {
152  private final ServerName sn;
153  private final ZKWatcher zkw;
154  private final Configuration conf;
155
156  /**
157   * Map of regions to map of rows and {@link Result}. Used as data source when
158   * {@link #get(RpcController, ClientProtos.GetRequest)} is called. Because we have a byte key,
159   * need to use TreeMap and provide a Comparator. Use {@link #setGetResult(byte[], byte[], Result)}
160   * filling this map.
161   */
162  private final Map<byte[], Map<byte[], Result>> gets = new TreeMap<>(Bytes.BYTES_COMPARATOR);
163
164  /**
165   * Map of regions to results to return when scanning.
166   */
167  private final Map<byte[], Result[]> nexts = new TreeMap<>(Bytes.BYTES_COMPARATOR);
168
169  /**
170   * Data structure that holds regionname and index used scanning.
171   */
172  class RegionNameAndIndex {
173    private final byte[] regionName;
174    private int index = 0;
175
176    RegionNameAndIndex(final byte[] regionName) {
177      this.regionName = regionName;
178    }
179
180    byte[] getRegionName() {
181      return this.regionName;
182    }
183
184    int getThenIncrement() {
185      int currentIndex = this.index;
186      this.index++;
187      return currentIndex;
188    }
189  }
190
191  /**
192   * Outstanding scanners and their offset into <code>nexts</code>
193   */
194  private final Map<Long, RegionNameAndIndex> scannersAndOffsets = new HashMap<>();
195
196  /**
197   * @param sn Name of this mock regionserver n * @throws
198   *           org.apache.hadoop.hbase.ZooKeeperConnectionException
199   */
200  MockRegionServer(final Configuration conf, final ServerName sn)
201    throws ZooKeeperConnectionException, IOException {
202    this.sn = sn;
203    this.conf = conf;
204    this.zkw = new ZKWatcher(conf, sn.toString(), this, true);
205  }
206
207  /**
208   * Use this method filling the backing data source used by
209   * {@link #get(RpcController, ClientProtos.GetRequest)}
210   * @param regionName the region name to assign
211   * @param row        the row key
212   * @param r          the single row result
213   */
214  void setGetResult(final byte[] regionName, final byte[] row, final Result r) {
215    Map<byte[], Result> value = this.gets.get(regionName);
216    if (value == null) {
217      // If no value already, create one. Needs to be treemap because we are
218      // using byte array as key. Not thread safe.
219      value = new TreeMap<>(Bytes.BYTES_COMPARATOR);
220      this.gets.put(regionName, value);
221    }
222    value.put(row, r);
223  }
224
225  /**
226   * Use this method to set what a scanner will reply as we next through nn
227   */
228  void setNextResults(final byte[] regionName, final Result[] rs) {
229    this.nexts.put(regionName, rs);
230  }
231
232  @Override
233  public boolean isStopped() {
234    return false;
235  }
236
237  @Override
238  public void abort(String why, Throwable e) {
239    throw new RuntimeException(this.sn + ": " + why, e);
240  }
241
242  @Override
243  public boolean isAborted() {
244    return false;
245  }
246
247  public long openScanner(byte[] regionName, Scan scan) throws IOException {
248    long scannerId = ThreadLocalRandom.current().nextLong();
249    this.scannersAndOffsets.put(scannerId, new RegionNameAndIndex(regionName));
250    return scannerId;
251  }
252
253  public Result next(long scannerId) throws IOException {
254    RegionNameAndIndex rnai = this.scannersAndOffsets.get(scannerId);
255    int index = rnai.getThenIncrement();
256    Result[] results = this.nexts.get(rnai.getRegionName());
257    if (results == null) return null;
258    return index < results.length ? results[index] : null;
259  }
260
261  public Result[] next(long scannerId, int numberOfRows) throws IOException {
262    // Just return one result whatever they ask for.
263    Result r = next(scannerId);
264    return r == null ? null : new Result[] { r };
265  }
266
267  public void close(final long scannerId) throws IOException {
268    this.scannersAndOffsets.remove(scannerId);
269  }
270
271  @Override
272  public void stop(String why) {
273    this.zkw.close();
274  }
275
276  @Override
277  public void addRegion(HRegion r) {
278  }
279
280  @Override
281  public boolean removeRegion(HRegion r, ServerName destination) {
282    return false;
283  }
284
285  @Override
286  public HRegion getRegion(String encodedRegionName) {
287    return null;
288  }
289
290  @Override
291  public Configuration getConfiguration() {
292    return this.conf;
293  }
294
295  @Override
296  public ZKWatcher getZooKeeper() {
297    return this.zkw;
298  }
299
300  @Override
301  public CoordinatedStateManager getCoordinatedStateManager() {
302    return null;
303  }
304
305  @Override
306  public ClusterConnection getConnection() {
307    return null;
308  }
309
310  @Override
311  public ServerName getServerName() {
312    return this.sn;
313  }
314
315  @Override
316  public boolean isStopping() {
317    return false;
318  }
319
320  @Override
321  public FlushRequester getFlushRequester() {
322    return null;
323  }
324
325  @Override
326  public CompactionRequester getCompactionRequestor() {
327    return null;
328  }
329
330  @Override
331  public RegionServerAccounting getRegionServerAccounting() {
332    return null;
333  }
334
335  @Override
336  public RegionServerRpcQuotaManager getRegionServerRpcQuotaManager() {
337    return null;
338  }
339
340  @Override
341  public void postOpenDeployTasks(PostOpenDeployContext context) throws IOException {
342  }
343
344  @Override
345  public RpcServerInterface getRpcServer() {
346    return null;
347  }
348
349  @Override
350  public ConcurrentSkipListMap<byte[], Boolean> getRegionsInTransitionInRS() {
351    return null;
352  }
353
354  @Override
355  public FileSystem getFileSystem() {
356    return null;
357  }
358
359  @Override
360  public GetResponse get(RpcController controller, GetRequest request) throws ServiceException {
361    byte[] regionName = request.getRegion().getValue().toByteArray();
362    Map<byte[], Result> m = this.gets.get(regionName);
363    GetResponse.Builder builder = GetResponse.newBuilder();
364    if (m != null) {
365      byte[] row = request.getGet().getRow().toByteArray();
366      builder.setResult(ProtobufUtil.toResult(m.get(row)));
367    }
368    return builder.build();
369  }
370
371  @Override
372  public MutateResponse mutate(RpcController controller, MutateRequest request)
373    throws ServiceException {
374    return null;
375  }
376
377  @Override
378  public ScanResponse scan(RpcController controller, ScanRequest request) throws ServiceException {
379    ScanResponse.Builder builder = ScanResponse.newBuilder();
380    try {
381      if (request.hasScan()) {
382        byte[] regionName = request.getRegion().getValue().toByteArray();
383        builder.setScannerId(openScanner(regionName, null));
384        builder.setMoreResults(true);
385      } else {
386        long scannerId = request.getScannerId();
387        Result result = next(scannerId);
388        if (result != null) {
389          builder.addCellsPerResult(result.size());
390          List<CellScannable> results = new ArrayList<>(1);
391          results.add(result);
392          ((HBaseRpcController) controller).setCellScanner(CellUtil.createCellScanner(results));
393          builder.setMoreResults(true);
394        } else {
395          builder.setMoreResults(false);
396          close(scannerId);
397        }
398      }
399    } catch (IOException ie) {
400      throw new ServiceException(ie);
401    }
402    return builder.build();
403  }
404
405  @Override
406  public BulkLoadHFileResponse bulkLoadHFile(RpcController controller, BulkLoadHFileRequest request)
407    throws ServiceException {
408    return null;
409  }
410
411  @Override
412  public ClientProtos.CoprocessorServiceResponse execService(RpcController controller,
413    ClientProtos.CoprocessorServiceRequest request) throws ServiceException {
414    return null;
415  }
416
417  @Override
418  public ClientProtos.MultiResponse multi(RpcController controller, MultiRequest request)
419    throws ServiceException {
420    return null;
421  }
422
423  @Override
424  public GetRegionInfoResponse getRegionInfo(RpcController controller, GetRegionInfoRequest request)
425    throws ServiceException {
426    GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
427    builder.setRegionInfo(ProtobufUtil.toRegionInfo(RegionInfoBuilder.FIRST_META_REGIONINFO));
428    return builder.build();
429  }
430
431  @Override
432  public GetRegionLoadResponse getRegionLoad(RpcController controller, GetRegionLoadRequest request)
433    throws ServiceException {
434    GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder();
435    return builder.build();
436  }
437
438  @Override
439  public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller,
440    ClearCompactionQueuesRequest request) throws ServiceException {
441    return null;
442  }
443
444  @Override
445  public GetStoreFileResponse getStoreFile(RpcController controller, GetStoreFileRequest request)
446    throws ServiceException {
447    return null;
448  }
449
450  @Override
451  public GetOnlineRegionResponse getOnlineRegion(RpcController controller,
452    GetOnlineRegionRequest request) throws ServiceException {
453    return null;
454  }
455
456  @Override
457  public List<Region> getRegions() {
458    return null;
459  }
460
461  @Override
462  public OpenRegionResponse openRegion(RpcController controller, OpenRegionRequest request)
463    throws ServiceException {
464    return null;
465  }
466
467  @Override
468  public WarmupRegionResponse warmupRegion(RpcController controller, WarmupRegionRequest request)
469    throws ServiceException {
470    return null;
471  }
472
473  @Override
474  public CloseRegionResponse closeRegion(RpcController controller, CloseRegionRequest request)
475    throws ServiceException {
476    return null;
477  }
478
479  @Override
480  public FlushRegionResponse flushRegion(RpcController controller, FlushRegionRequest request)
481    throws ServiceException {
482    return null;
483  }
484
485  @Override
486  public CompactionSwitchResponse compactionSwitch(RpcController controller,
487    CompactionSwitchRequest request) throws ServiceException {
488    return null;
489  }
490
491  @Override
492  public CompactRegionResponse compactRegion(RpcController controller, CompactRegionRequest request)
493    throws ServiceException {
494    return null;
495  }
496
497  @Override
498  public ReplicateWALEntryResponse replicateWALEntry(RpcController controller,
499    ReplicateWALEntryRequest request) throws ServiceException {
500    return null;
501  }
502
503  @Override
504  public RollWALWriterResponse rollWALWriter(RpcController controller, RollWALWriterRequest request)
505    throws ServiceException {
506    return null;
507  }
508
509  @Override
510  public GetServerInfoResponse getServerInfo(RpcController controller, GetServerInfoRequest request)
511    throws ServiceException {
512    return null;
513  }
514
515  @Override
516  public StopServerResponse stopServer(RpcController controller, StopServerRequest request)
517    throws ServiceException {
518    return null;
519  }
520
521  @Override
522  public List<Region> getRegions(TableName tableName) throws IOException {
523    return null;
524  }
525
526  @Override
527  public LeaseManager getLeaseManager() {
528    return null;
529  }
530
531  @Override
532  public List<WAL> getWALs() throws IOException {
533    return Collections.emptyList();
534  }
535
536  @Override
537  public WAL getWAL(RegionInfo regionInfo) throws IOException {
538    return null;
539  }
540
541  @Override
542  public ExecutorService getExecutorService() {
543    return null;
544  }
545
546  @Override
547  public ChoreService getChoreService() {
548    return null;
549  }
550
551  @Override
552  public void updateRegionFavoredNodesMapping(String encodedRegionName,
553    List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> favoredNodes) {
554  }
555
556  @Override
557  public InetSocketAddress[] getFavoredNodesForRegion(String encodedRegionName) {
558    return null;
559  }
560
561  @Override
562  public ReplicateWALEntryResponse replay(RpcController controller,
563    ReplicateWALEntryRequest request) throws ServiceException {
564    return null;
565  }
566
567  @Override
568  public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller,
569    UpdateFavoredNodesRequest request) throws ServiceException {
570    return null;
571  }
572
573  @Override
574  public ServerNonceManager getNonceManager() {
575    return null;
576  }
577
578  @Override
579  public boolean reportRegionStateTransition(RegionStateTransitionContext context) {
580    return false;
581  }
582
583  @Override
584  public boolean registerService(com.google.protobuf.Service service) {
585    return false;
586  }
587
588  @Override
589  public CoprocessorServiceResponse execRegionServerService(RpcController controller,
590    CoprocessorServiceRequest request) throws ServiceException {
591    return null;
592  }
593
594  @Override
595  public UpdateConfigurationResponse updateConfiguration(RpcController controller,
596    UpdateConfigurationRequest request) throws ServiceException {
597    return null;
598  }
599
600  @Override
601  public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller,
602    ClearRegionBlockCacheRequest request) throws ServiceException {
603    return null;
604  }
605
606  @Override
607  public HeapMemoryManager getHeapMemoryManager() {
608    return null;
609  }
610
611  @Override
612  public double getCompactionPressure() {
613    return 0;
614  }
615
616  @Override
617  public ClusterConnection getClusterConnection() {
618    return null;
619  }
620
621  @Override
622  public ThroughputController getFlushThroughputController() {
623    return null;
624  }
625
626  @Override
627  public double getFlushPressure() {
628    return 0;
629  }
630
631  @Override
632  public MetricsRegionServer getMetrics() {
633    return null;
634  }
635
636  @Override
637  public EntityLock regionLock(List<RegionInfo> regionInfos, String description, Abortable abort)
638    throws IOException {
639    return null;
640  }
641
642  @Override
643  public PrepareBulkLoadResponse prepareBulkLoad(RpcController controller,
644    PrepareBulkLoadRequest request) throws ServiceException {
645    return null;
646  }
647
648  @Override
649  public CleanupBulkLoadResponse cleanupBulkLoad(RpcController controller,
650    CleanupBulkLoadRequest request) throws ServiceException {
651    return null;
652  }
653
654  @Override
655  public SecureBulkLoadManager getSecureBulkLoadManager() {
656    return null;
657  }
658
659  @Override
660  public void unassign(byte[] regionName) throws IOException {
661  }
662
663  @Override
664  public RegionServerSpaceQuotaManager getRegionServerSpaceQuotaManager() {
665    return null;
666  }
667
668  @Override
669  public ExecuteProceduresResponse executeProcedures(RpcController controller,
670    ExecuteProceduresRequest request) throws ServiceException {
671    return null;
672  }
673
674  @Override
675  public SlowLogResponses getSlowLogResponses(RpcController controller,
676    SlowLogResponseRequest request) throws ServiceException {
677    return null;
678  }
679
680  @Override
681  public SlowLogResponses getLargeLogResponses(RpcController controller,
682    SlowLogResponseRequest request) throws ServiceException {
683    return null;
684  }
685
686  @Override
687  public ClearSlowLogResponses clearSlowLogsResponses(RpcController controller,
688    ClearSlowLogResponseRequest request) throws ServiceException {
689    return null;
690  }
691
692  @Override
693  public HBaseProtos.LogEntry getLogEntries(RpcController controller,
694    HBaseProtos.LogRequest request) throws ServiceException {
695    return null;
696  }
697
698  @Override
699  public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(RpcController controller,
700    GetSpaceQuotaSnapshotsRequest request) throws ServiceException {
701    return null;
702  }
703
704  @Override
705  public Connection createConnection(Configuration conf) throws IOException {
706    return null;
707  }
708
709  @Override
710  public boolean isClusterUp() {
711    return true;
712  }
713
714  @Override
715  public TableDescriptors getTableDescriptors() {
716    return null;
717  }
718
719  @Override
720  public Optional<BlockCache> getBlockCache() {
721    return Optional.empty();
722  }
723
724  @Override
725  public Optional<MobFileCache> getMobFileCache() {
726    return Optional.empty();
727  }
728
729  @Override
730  public AccessChecker getAccessChecker() {
731    return null;
732  }
733
734  @Override
735  public ZKPermissionWatcher getZKPermissionWatcher() {
736    return null;
737  }
738
739  @Override
740  public boolean reportRegionSizesForQuotas(RegionSizeStore sizeStore) {
741    return true;
742  }
743
744  @Override
745  public boolean reportFileArchivalForQuotas(TableName tableName,
746    Collection<Entry<String, Long>> archivedFiles) {
747    return false;
748  }
749}