001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master; 019 020import java.io.IOException; 021import java.net.InetSocketAddress; 022import java.util.ArrayList; 023import java.util.Collection; 024import java.util.Collections; 025import java.util.HashMap; 026import java.util.List; 027import java.util.Map; 028import java.util.Map.Entry; 029import java.util.Optional; 030import java.util.Random; 031import java.util.TreeMap; 032import java.util.concurrent.ConcurrentSkipListMap; 033import org.apache.hadoop.conf.Configuration; 034import org.apache.hadoop.fs.FileSystem; 035import org.apache.hadoop.hbase.Abortable; 036import org.apache.hadoop.hbase.CellScannable; 037import org.apache.hadoop.hbase.CellUtil; 038import org.apache.hadoop.hbase.ChoreService; 039import org.apache.hadoop.hbase.CoordinatedStateManager; 040import org.apache.hadoop.hbase.ServerName; 041import org.apache.hadoop.hbase.TableDescriptors; 042import org.apache.hadoop.hbase.TableName; 043import org.apache.hadoop.hbase.ZooKeeperConnectionException; 044import org.apache.hadoop.hbase.client.ClusterConnection; 045import org.apache.hadoop.hbase.client.Connection; 046import org.apache.hadoop.hbase.client.RegionInfo; 047import org.apache.hadoop.hbase.client.RegionInfoBuilder; 048import org.apache.hadoop.hbase.client.Result; 049import org.apache.hadoop.hbase.client.Scan; 050import org.apache.hadoop.hbase.client.locking.EntityLock; 051import org.apache.hadoop.hbase.executor.ExecutorService; 052import org.apache.hadoop.hbase.io.hfile.BlockCache; 053import org.apache.hadoop.hbase.ipc.HBaseRpcController; 054import org.apache.hadoop.hbase.ipc.RpcServerInterface; 055import org.apache.hadoop.hbase.mob.MobFileCache; 056import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager; 057import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; 058import org.apache.hadoop.hbase.quotas.RegionSizeStore; 059import org.apache.hadoop.hbase.regionserver.FlushRequester; 060import org.apache.hadoop.hbase.regionserver.HRegion; 061import org.apache.hadoop.hbase.regionserver.HeapMemoryManager; 062import org.apache.hadoop.hbase.regionserver.Leases; 063import org.apache.hadoop.hbase.regionserver.MetricsRegionServer; 064import org.apache.hadoop.hbase.regionserver.Region; 065import org.apache.hadoop.hbase.regionserver.RegionServerAccounting; 066import org.apache.hadoop.hbase.regionserver.RegionServerServices; 067import org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager; 068import org.apache.hadoop.hbase.regionserver.ServerNonceManager; 069import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester; 070import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; 071import org.apache.hadoop.hbase.util.Bytes; 072import org.apache.hadoop.hbase.wal.WAL; 073import org.apache.hadoop.hbase.zookeeper.ZKWatcher; 074 075import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; 076import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; 077 078import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 079import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; 080import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest; 081import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse; 082import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest; 083import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse; 084import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; 085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; 086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; 087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; 088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest; 089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse; 090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest; 091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; 092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; 093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; 094import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; 095import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; 096import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; 097import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; 098import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest; 099import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; 100import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest; 101import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; 102import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest; 103import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse; 104import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; 105import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse; 106import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; 107import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; 108import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; 109import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; 110import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; 111import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse; 112import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; 113import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse; 114import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; 115import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; 116import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest; 117import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse; 118import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; 119import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest; 120import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileResponse; 121import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest; 122import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadResponse; 123import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; 124import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse; 125import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest; 126import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse; 127import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest; 128import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest; 129import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse; 130import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest; 131import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse; 132import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest; 133import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse; 134import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest; 135import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse; 136 137/** 138 * A mock RegionServer implementation. 139 * Use this when you can't bend Mockito to your liking (e.g. return null result 140 * when 'scanning' until master timesout and then return a coherent meta row 141 * result thereafter. Have some facility for faking gets and scans. See 142 * setGetResult(byte[], byte[], Result) for how to fill the backing data 143 * store that the get pulls from. 144 */ 145class MockRegionServer implements AdminProtos.AdminService.BlockingInterface, 146 ClientProtos.ClientService.BlockingInterface, RegionServerServices { 147 private final ServerName sn; 148 private final ZKWatcher zkw; 149 private final Configuration conf; 150 private final Random random = new Random(); 151 152 /** 153 * Map of regions to map of rows and {@link Result}. Used as data source when 154 * {@link #get(RpcController, ClientProtos.GetRequest)} is called. Because we have a byte 155 * key, need to use TreeMap and provide a Comparator. Use 156 * {@link #setGetResult(byte[], byte[], Result)} filling this map. 157 */ 158 private final Map<byte [], Map<byte [], Result>> gets = new TreeMap<>(Bytes.BYTES_COMPARATOR); 159 160 /** 161 * Map of regions to results to return when scanning. 162 */ 163 private final Map<byte [], Result []> nexts = new TreeMap<>(Bytes.BYTES_COMPARATOR); 164 165 /** 166 * Data structure that holds regionname and index used scanning. 167 */ 168 class RegionNameAndIndex { 169 private final byte[] regionName; 170 private int index = 0; 171 172 RegionNameAndIndex(final byte[] regionName) { 173 this.regionName = regionName; 174 } 175 176 byte[] getRegionName() { 177 return this.regionName; 178 } 179 180 int getThenIncrement() { 181 int currentIndex = this.index; 182 this.index++; 183 return currentIndex; 184 } 185 } 186 187 /** 188 * Outstanding scanners and their offset into <code>nexts</code> 189 */ 190 private final Map<Long, RegionNameAndIndex> scannersAndOffsets = new HashMap<>(); 191 192 /** 193 * @param sn Name of this mock regionserver 194 * @throws IOException 195 * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException 196 */ 197 MockRegionServer(final Configuration conf, final ServerName sn) 198 throws ZooKeeperConnectionException, IOException { 199 this.sn = sn; 200 this.conf = conf; 201 this.zkw = new ZKWatcher(conf, sn.toString(), this, true); 202 } 203 204 /** 205 * Use this method filling the backing data source used by 206 * {@link #get(RpcController, ClientProtos.GetRequest)} 207 * @param regionName the region name to assign 208 * @param row the row key 209 * @param r the single row result 210 */ 211 void setGetResult(final byte [] regionName, final byte [] row, final Result r) { 212 Map<byte [], Result> value = this.gets.get(regionName); 213 if (value == null) { 214 // If no value already, create one. Needs to be treemap because we are 215 // using byte array as key. Not thread safe. 216 value = new TreeMap<>(Bytes.BYTES_COMPARATOR); 217 this.gets.put(regionName, value); 218 } 219 value.put(row, r); 220 } 221 222 /** 223 * Use this method to set what a scanner will reply as we next through 224 * @param regionName 225 * @param rs 226 */ 227 void setNextResults(final byte [] regionName, final Result [] rs) { 228 this.nexts.put(regionName, rs); 229 } 230 231 @Override 232 public boolean isStopped() { 233 return false; 234 } 235 236 @Override 237 public void abort(String why, Throwable e) { 238 throw new RuntimeException(this.sn + ": " + why, e); 239 } 240 241 @Override 242 public boolean isAborted() { 243 return false; 244 } 245 246 public long openScanner(byte[] regionName, Scan scan) throws IOException { 247 long scannerId = this.random.nextLong(); 248 this.scannersAndOffsets.put(scannerId, new RegionNameAndIndex(regionName)); 249 return scannerId; 250 } 251 252 public Result next(long scannerId) throws IOException { 253 RegionNameAndIndex rnai = this.scannersAndOffsets.get(scannerId); 254 int index = rnai.getThenIncrement(); 255 Result [] results = this.nexts.get(rnai.getRegionName()); 256 if (results == null) return null; 257 return index < results.length? results[index]: null; 258 } 259 260 public Result [] next(long scannerId, int numberOfRows) throws IOException { 261 // Just return one result whatever they ask for. 262 Result r = next(scannerId); 263 return r == null? null: new Result [] {r}; 264 } 265 266 public void close(final long scannerId) throws IOException { 267 this.scannersAndOffsets.remove(scannerId); 268 } 269 270 @Override 271 public void stop(String why) { 272 this.zkw.close(); 273 } 274 275 @Override 276 public void addRegion(HRegion r) { 277 } 278 279 @Override 280 public boolean removeRegion(HRegion r, ServerName destination) { 281 return false; 282 } 283 284 @Override 285 public HRegion getRegion(String encodedRegionName) { 286 return null; 287 } 288 289 @Override 290 public Configuration getConfiguration() { 291 return this.conf; 292 } 293 294 @Override 295 public ZKWatcher getZooKeeper() { 296 return this.zkw; 297 } 298 299 @Override 300 public CoordinatedStateManager getCoordinatedStateManager() { 301 return null; 302 } 303 304 @Override 305 public ClusterConnection getConnection() { 306 return null; 307 } 308 309 @Override 310 public ServerName getServerName() { 311 return this.sn; 312 } 313 314 @Override 315 public boolean isStopping() { 316 return false; 317 } 318 319 @Override 320 public FlushRequester getFlushRequester() { 321 return null; 322 } 323 @Override 324 public CompactionRequester getCompactionRequestor() { 325 return null; 326 } 327 @Override 328 public RegionServerAccounting getRegionServerAccounting() { 329 return null; 330 } 331 332 @Override 333 public RegionServerRpcQuotaManager getRegionServerRpcQuotaManager() { 334 return null; 335 } 336 337 @Override 338 public void postOpenDeployTasks(PostOpenDeployContext context) throws IOException { 339 } 340 341 @Override 342 public RpcServerInterface getRpcServer() { 343 return null; 344 } 345 346 @Override 347 public ConcurrentSkipListMap<byte[], Boolean> getRegionsInTransitionInRS() { 348 return null; 349 } 350 351 @Override 352 public FileSystem getFileSystem() { 353 return null; 354 } 355 356 @Override 357 public GetResponse get(RpcController controller, GetRequest request) 358 throws ServiceException { 359 byte[] regionName = request.getRegion().getValue().toByteArray(); 360 Map<byte [], Result> m = this.gets.get(regionName); 361 GetResponse.Builder builder = GetResponse.newBuilder(); 362 if (m != null) { 363 byte[] row = request.getGet().getRow().toByteArray(); 364 builder.setResult(ProtobufUtil.toResult(m.get(row))); 365 } 366 return builder.build(); 367 } 368 369 @Override 370 public MutateResponse mutate(RpcController controller, MutateRequest request) 371 throws ServiceException { 372 return null; 373 } 374 375 @Override 376 public ScanResponse scan(RpcController controller, ScanRequest request) 377 throws ServiceException { 378 ScanResponse.Builder builder = ScanResponse.newBuilder(); 379 try { 380 if (request.hasScan()) { 381 byte[] regionName = request.getRegion().getValue().toByteArray(); 382 builder.setScannerId(openScanner(regionName, null)); 383 builder.setMoreResults(true); 384 } 385 else { 386 long scannerId = request.getScannerId(); 387 Result result = next(scannerId); 388 if (result != null) { 389 builder.addCellsPerResult(result.size()); 390 List<CellScannable> results = new ArrayList<>(1); 391 results.add(result); 392 ((HBaseRpcController) controller).setCellScanner(CellUtil 393 .createCellScanner(results)); 394 builder.setMoreResults(true); 395 } 396 else { 397 builder.setMoreResults(false); 398 close(scannerId); 399 } 400 } 401 } catch (IOException ie) { 402 throw new ServiceException(ie); 403 } 404 return builder.build(); 405 } 406 407 @Override 408 public BulkLoadHFileResponse bulkLoadHFile(RpcController controller, 409 BulkLoadHFileRequest request) throws ServiceException { 410 return null; 411 } 412 413 @Override 414 public ClientProtos.CoprocessorServiceResponse execService(RpcController controller, 415 ClientProtos.CoprocessorServiceRequest request) throws ServiceException { 416 return null; 417 } 418 419 @Override 420 public ClientProtos.MultiResponse multi( 421 RpcController controller, MultiRequest request) throws ServiceException { 422 return null; 423 } 424 425 @Override 426 public GetRegionInfoResponse getRegionInfo(RpcController controller, 427 GetRegionInfoRequest request) throws ServiceException { 428 GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); 429 builder.setRegionInfo(ProtobufUtil.toRegionInfo(RegionInfoBuilder.FIRST_META_REGIONINFO)); 430 return builder.build(); 431 } 432 433 @Override 434 public GetRegionLoadResponse getRegionLoad(RpcController controller, 435 GetRegionLoadRequest request) throws ServiceException { 436 GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder(); 437 return builder.build(); 438 } 439 440 @Override 441 public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller, 442 ClearCompactionQueuesRequest request) throws ServiceException { 443 return null; 444 } 445 446 @Override 447 public GetStoreFileResponse getStoreFile(RpcController controller, 448 GetStoreFileRequest request) throws ServiceException { 449 return null; 450 } 451 452 @Override 453 public GetOnlineRegionResponse getOnlineRegion(RpcController controller, 454 GetOnlineRegionRequest request) throws ServiceException { 455 return null; 456 } 457 458 @Override 459 public List<Region> getRegions() { 460 return null; 461 } 462 463 @Override 464 public OpenRegionResponse openRegion(RpcController controller, 465 OpenRegionRequest request) throws ServiceException { 466 return null; 467 } 468 469 @Override 470 public WarmupRegionResponse warmupRegion(RpcController controller, 471 WarmupRegionRequest request) throws ServiceException { 472 return null; 473 } 474 @Override 475 public CloseRegionResponse closeRegion(RpcController controller, 476 CloseRegionRequest request) throws ServiceException { 477 return null; 478 } 479 480 @Override 481 public FlushRegionResponse flushRegion(RpcController controller, 482 FlushRegionRequest request) throws ServiceException { 483 return null; 484 } 485 486 @Override 487 public CompactionSwitchResponse compactionSwitch(RpcController controller, 488 CompactionSwitchRequest request) throws ServiceException { 489 return null; 490 } 491 492 @Override 493 public CompactRegionResponse compactRegion(RpcController controller, 494 CompactRegionRequest request) throws ServiceException { 495 return null; 496 } 497 498 @Override 499 public ReplicateWALEntryResponse replicateWALEntry(RpcController controller, 500 ReplicateWALEntryRequest request) throws ServiceException { 501 return null; 502 } 503 504 @Override 505 public RollWALWriterResponse rollWALWriter(RpcController controller, 506 RollWALWriterRequest request) throws ServiceException { 507 return null; 508 } 509 510 @Override 511 public GetServerInfoResponse getServerInfo(RpcController controller, 512 GetServerInfoRequest request) throws ServiceException { 513 return null; 514 } 515 516 @Override 517 public StopServerResponse stopServer(RpcController controller, 518 StopServerRequest request) throws ServiceException { 519 return null; 520 } 521 522 @Override 523 public List<Region> getRegions(TableName tableName) throws IOException { 524 return null; 525 } 526 527 @Override 528 public Leases getLeases() { 529 return null; 530 } 531 532 @Override 533 public List<WAL> getWALs() throws IOException { 534 return Collections.emptyList(); 535 } 536 537 @Override 538 public WAL getWAL(RegionInfo regionInfo) throws IOException { 539 return null; 540 } 541 542 @Override 543 public ExecutorService getExecutorService() { 544 return null; 545 } 546 547 @Override 548 public ChoreService getChoreService() { 549 return null; 550 } 551 552 @Override 553 public void updateRegionFavoredNodesMapping(String encodedRegionName, 554 List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> favoredNodes) { 555 } 556 557 @Override 558 public InetSocketAddress[] getFavoredNodesForRegion(String encodedRegionName) { 559 return null; 560 } 561 562 @Override 563 public ReplicateWALEntryResponse 564 replay(RpcController controller, ReplicateWALEntryRequest request) 565 throws ServiceException { 566 return null; 567 } 568 569 @Override 570 public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller, 571 UpdateFavoredNodesRequest request) throws ServiceException { 572 return null; 573 } 574 575 @Override 576 public ServerNonceManager getNonceManager() { 577 return null; 578 } 579 580 @Override 581 public boolean reportRegionStateTransition(RegionStateTransitionContext context) { 582 return false; 583 } 584 585 @Override 586 public boolean registerService(com.google.protobuf.Service service) { 587 return false; 588 } 589 590 @Override 591 public CoprocessorServiceResponse execRegionServerService(RpcController controller, 592 CoprocessorServiceRequest request) throws ServiceException { 593 return null; 594 } 595 596 @Override 597 public UpdateConfigurationResponse updateConfiguration( 598 RpcController controller, UpdateConfigurationRequest request) 599 throws ServiceException { 600 return null; 601 } 602 603 @Override 604 public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, 605 ClearRegionBlockCacheRequest request) 606 throws ServiceException { 607 return null; 608 } 609 610 @Override 611 public HeapMemoryManager getHeapMemoryManager() { 612 return null; 613 } 614 615 @Override 616 public double getCompactionPressure() { 617 return 0; 618 } 619 620 @Override 621 public ClusterConnection getClusterConnection() { 622 return null; 623 } 624 625 @Override 626 public ThroughputController getFlushThroughputController() { 627 return null; 628 } 629 630 @Override 631 public double getFlushPressure() { 632 return 0; 633 } 634 635 @Override 636 public MetricsRegionServer getMetrics() { 637 return null; 638 } 639 640 @Override 641 public EntityLock regionLock(List<RegionInfo> regionInfos, String description, Abortable abort) 642 throws IOException { 643 return null; 644 } 645 646 @Override 647 public PrepareBulkLoadResponse prepareBulkLoad(RpcController controller, 648 PrepareBulkLoadRequest request) throws ServiceException { 649 return null; 650 } 651 652 @Override 653 public CleanupBulkLoadResponse cleanupBulkLoad(RpcController controller, 654 CleanupBulkLoadRequest request) throws ServiceException { 655 return null; 656 } 657 658 @Override 659 public SecureBulkLoadManager getSecureBulkLoadManager() { 660 return null; 661 } 662 663 @Override 664 public void unassign(byte[] regionName) throws IOException { 665 } 666 667 @Override 668 public RegionServerSpaceQuotaManager getRegionServerSpaceQuotaManager() { 669 return null; 670 } 671 672 @Override 673 public ExecuteProceduresResponse executeProcedures(RpcController controller, 674 ExecuteProceduresRequest request) throws ServiceException { 675 return null; 676 } 677 678 @Override 679 public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots( 680 RpcController controller, GetSpaceQuotaSnapshotsRequest request) 681 throws ServiceException { 682 return null; 683 } 684 685 @Override 686 public Connection createConnection(Configuration conf) throws IOException { 687 return null; 688 } 689 690 @Override 691 public boolean isClusterUp() { 692 return true; 693 } 694 695 @Override 696 public TableDescriptors getTableDescriptors() { 697 return null; 698 } 699 700 @Override 701 public Optional<BlockCache> getBlockCache() { 702 return Optional.empty(); 703 } 704 705 @Override 706 public Optional<MobFileCache> getMobFileCache() { 707 return Optional.empty(); 708 } 709 710 @Override 711 public boolean reportRegionSizesForQuotas(RegionSizeStore sizeStore) { 712 return true; 713 } 714 715 @Override 716 public boolean reportFileArchivalForQuotas( 717 TableName tableName, Collection<Entry<String, Long>> archivedFiles) { 718 return false; 719 } 720}