001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK; 021import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER; 022import static org.apache.hadoop.hbase.HConstants.DEFAULT_SLOW_LOG_SYS_TABLE_CHORE_DURATION; 023import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; 024import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; 025import static org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator.WAL_EVENT_TRACKER_ENABLED_DEFAULT; 026import static org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator.WAL_EVENT_TRACKER_ENABLED_KEY; 027import static org.apache.hadoop.hbase.namequeues.NamedQueueServiceChore.NAMED_QUEUE_CHORE_DURATION_DEFAULT; 028import static org.apache.hadoop.hbase.namequeues.NamedQueueServiceChore.NAMED_QUEUE_CHORE_DURATION_KEY; 029import static org.apache.hadoop.hbase.replication.regionserver.ReplicationMarkerChore.REPLICATION_MARKER_CHORE_DURATION_DEFAULT; 030import static org.apache.hadoop.hbase.replication.regionserver.ReplicationMarkerChore.REPLICATION_MARKER_CHORE_DURATION_KEY; 031import static org.apache.hadoop.hbase.replication.regionserver.ReplicationMarkerChore.REPLICATION_MARKER_ENABLED_DEFAULT; 032import static org.apache.hadoop.hbase.replication.regionserver.ReplicationMarkerChore.REPLICATION_MARKER_ENABLED_KEY; 033import static org.apache.hadoop.hbase.util.DNS.UNSAFE_RS_HOSTNAME_KEY; 034 035import io.opentelemetry.api.trace.Span; 036import io.opentelemetry.api.trace.StatusCode; 037import io.opentelemetry.context.Scope; 038import java.io.IOException; 039import java.io.PrintWriter; 040import java.lang.management.MemoryUsage; 041import java.lang.reflect.Constructor; 042import java.net.InetSocketAddress; 043import java.time.Duration; 044import java.util.ArrayList; 045import java.util.Collection; 046import java.util.Collections; 047import java.util.Comparator; 048import java.util.HashSet; 049import java.util.Iterator; 050import java.util.List; 051import java.util.Map; 052import java.util.Map.Entry; 053import java.util.Objects; 054import java.util.Optional; 055import java.util.Set; 056import java.util.SortedMap; 057import java.util.Timer; 058import java.util.TimerTask; 059import java.util.TreeMap; 060import java.util.TreeSet; 061import java.util.concurrent.ConcurrentHashMap; 062import java.util.concurrent.ConcurrentMap; 063import java.util.concurrent.ConcurrentSkipListMap; 064import java.util.concurrent.ThreadLocalRandom; 065import java.util.concurrent.TimeUnit; 066import java.util.concurrent.atomic.AtomicBoolean; 067import java.util.concurrent.locks.ReentrantReadWriteLock; 068import java.util.stream.Collectors; 069import javax.management.MalformedObjectNameException; 070import javax.servlet.http.HttpServlet; 071import org.apache.commons.lang3.StringUtils; 072import org.apache.commons.lang3.mutable.MutableFloat; 073import org.apache.hadoop.conf.Configuration; 074import org.apache.hadoop.fs.FileSystem; 075import org.apache.hadoop.fs.Path; 076import org.apache.hadoop.hbase.Abortable; 077import org.apache.hadoop.hbase.CacheEvictionStats; 078import org.apache.hadoop.hbase.CallQueueTooBigException; 079import org.apache.hadoop.hbase.ClockOutOfSyncException; 080import org.apache.hadoop.hbase.DoNotRetryIOException; 081import org.apache.hadoop.hbase.ExecutorStatusChore; 082import org.apache.hadoop.hbase.HBaseConfiguration; 083import org.apache.hadoop.hbase.HBaseInterfaceAudience; 084import org.apache.hadoop.hbase.HBaseServerBase; 085import org.apache.hadoop.hbase.HConstants; 086import org.apache.hadoop.hbase.HDFSBlocksDistribution; 087import org.apache.hadoop.hbase.HRegionLocation; 088import org.apache.hadoop.hbase.HealthCheckChore; 089import org.apache.hadoop.hbase.MetaTableAccessor; 090import org.apache.hadoop.hbase.NotServingRegionException; 091import org.apache.hadoop.hbase.PleaseHoldException; 092import org.apache.hadoop.hbase.ScheduledChore; 093import org.apache.hadoop.hbase.ServerName; 094import org.apache.hadoop.hbase.Stoppable; 095import org.apache.hadoop.hbase.TableName; 096import org.apache.hadoop.hbase.YouAreDeadException; 097import org.apache.hadoop.hbase.ZNodeClearer; 098import org.apache.hadoop.hbase.client.ConnectionUtils; 099import org.apache.hadoop.hbase.client.RegionInfo; 100import org.apache.hadoop.hbase.client.RegionInfoBuilder; 101import org.apache.hadoop.hbase.client.locking.EntityLock; 102import org.apache.hadoop.hbase.client.locking.LockServiceClient; 103import org.apache.hadoop.hbase.conf.ConfigurationObserver; 104import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; 105import org.apache.hadoop.hbase.exceptions.RegionMovedException; 106import org.apache.hadoop.hbase.exceptions.RegionOpeningException; 107import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; 108import org.apache.hadoop.hbase.executor.ExecutorType; 109import org.apache.hadoop.hbase.http.InfoServer; 110import org.apache.hadoop.hbase.io.hfile.BlockCache; 111import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; 112import org.apache.hadoop.hbase.io.hfile.HFile; 113import org.apache.hadoop.hbase.io.util.MemorySizeUtil; 114import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; 115import org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException; 116import org.apache.hadoop.hbase.ipc.RpcClient; 117import org.apache.hadoop.hbase.ipc.RpcServer; 118import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; 119import org.apache.hadoop.hbase.ipc.ServerRpcController; 120import org.apache.hadoop.hbase.log.HBaseMarkers; 121import org.apache.hadoop.hbase.mob.MobFileCache; 122import org.apache.hadoop.hbase.mob.RSMobFileCleanerChore; 123import org.apache.hadoop.hbase.monitoring.TaskMonitor; 124import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; 125import org.apache.hadoop.hbase.namequeues.NamedQueueServiceChore; 126import org.apache.hadoop.hbase.net.Address; 127import org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost; 128import org.apache.hadoop.hbase.procedure2.RSProcedureCallable; 129import org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore; 130import org.apache.hadoop.hbase.quotas.QuotaUtil; 131import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager; 132import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; 133import org.apache.hadoop.hbase.quotas.RegionSize; 134import org.apache.hadoop.hbase.quotas.RegionSizeStore; 135import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; 136import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; 137import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; 138import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester; 139import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler; 140import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler; 141import org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler; 142import org.apache.hadoop.hbase.regionserver.handler.RegionReplicaFlushHandler; 143import org.apache.hadoop.hbase.regionserver.http.RSDumpServlet; 144import org.apache.hadoop.hbase.regionserver.http.RSStatusServlet; 145import org.apache.hadoop.hbase.regionserver.regionreplication.RegionReplicationBufferManager; 146import org.apache.hadoop.hbase.regionserver.throttle.FlushThroughputControllerFactory; 147import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; 148import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; 149import org.apache.hadoop.hbase.regionserver.wal.WALEventTrackerListener; 150import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; 151import org.apache.hadoop.hbase.replication.regionserver.ReplicationMarkerChore; 152import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface; 153import org.apache.hadoop.hbase.replication.regionserver.ReplicationStatus; 154import org.apache.hadoop.hbase.security.SecurityConstants; 155import org.apache.hadoop.hbase.security.Superusers; 156import org.apache.hadoop.hbase.security.User; 157import org.apache.hadoop.hbase.security.UserProvider; 158import org.apache.hadoop.hbase.trace.TraceUtil; 159import org.apache.hadoop.hbase.util.Bytes; 160import org.apache.hadoop.hbase.util.CompressionTest; 161import org.apache.hadoop.hbase.util.CoprocessorConfigurationUtil; 162import org.apache.hadoop.hbase.util.DNS; 163import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 164import org.apache.hadoop.hbase.util.FSUtils; 165import org.apache.hadoop.hbase.util.FutureUtils; 166import org.apache.hadoop.hbase.util.JvmPauseMonitor; 167import org.apache.hadoop.hbase.util.Pair; 168import org.apache.hadoop.hbase.util.RetryCounter; 169import org.apache.hadoop.hbase.util.RetryCounterFactory; 170import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; 171import org.apache.hadoop.hbase.util.Threads; 172import org.apache.hadoop.hbase.util.VersionInfo; 173import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; 174import org.apache.hadoop.hbase.wal.WAL; 175import org.apache.hadoop.hbase.wal.WALFactory; 176import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; 177import org.apache.hadoop.hbase.zookeeper.ZKClusterId; 178import org.apache.hadoop.hbase.zookeeper.ZKNodeTracker; 179import org.apache.hadoop.hbase.zookeeper.ZKUtil; 180import org.apache.hadoop.ipc.RemoteException; 181import org.apache.hadoop.util.ReflectionUtils; 182import org.apache.yetus.audience.InterfaceAudience; 183import org.apache.zookeeper.KeeperException; 184import org.slf4j.Logger; 185import org.slf4j.LoggerFactory; 186 187import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; 188import org.apache.hbase.thirdparty.com.google.common.base.Throwables; 189import org.apache.hbase.thirdparty.com.google.common.cache.Cache; 190import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; 191import org.apache.hbase.thirdparty.com.google.common.collect.Maps; 192import org.apache.hbase.thirdparty.com.google.common.net.InetAddresses; 193import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel; 194import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; 195import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.ServiceDescriptor; 196import org.apache.hbase.thirdparty.com.google.protobuf.Message; 197import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; 198import org.apache.hbase.thirdparty.com.google.protobuf.Service; 199import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; 200import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; 201import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; 202 203import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 204import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; 205import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall; 206import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; 207import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse; 208import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; 209import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad; 210import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; 211import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.UserLoad; 212import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor; 213import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; 214import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo; 215import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; 216import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; 217import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; 218import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; 219import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; 220import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; 221import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; 222import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; 223import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; 224import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; 225import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse; 226import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest; 227import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; 228import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; 229import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest; 230import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; 231import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; 232import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; 233 234/** 235 * HRegionServer makes a set of HRegions available to clients. It checks in with the HMaster. There 236 * are many HRegionServers in a single HBase deployment. 237 */ 238@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) 239@SuppressWarnings({ "deprecation" }) 240public class HRegionServer extends HBaseServerBase<RSRpcServices> 241 implements RegionServerServices, LastSequenceId { 242 243 private static final Logger LOG = LoggerFactory.getLogger(HRegionServer.class); 244 245 int unitMB = 1024 * 1024; 246 int unitKB = 1024; 247 248 /** 249 * For testing only! Set to true to skip notifying region assignment to master . 250 */ 251 @InterfaceAudience.Private 252 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_SHOULD_BE_FINAL") 253 public static boolean TEST_SKIP_REPORTING_TRANSITION = false; 254 255 /** 256 * A map from RegionName to current action in progress. Boolean value indicates: true - if open 257 * region action in progress false - if close region action in progress 258 */ 259 private final ConcurrentMap<byte[], Boolean> regionsInTransitionInRS = 260 new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); 261 262 /** 263 * Used to cache the open/close region procedures which already submitted. See 264 * {@link #submitRegionProcedure(long)}. 265 */ 266 private final ConcurrentMap<Long, Long> submittedRegionProcedures = new ConcurrentHashMap<>(); 267 /** 268 * Used to cache the open/close region procedures which already executed. See 269 * {@link #submitRegionProcedure(long)}. 270 */ 271 private final Cache<Long, Long> executedRegionProcedures = 272 CacheBuilder.newBuilder().expireAfterAccess(600, TimeUnit.SECONDS).build(); 273 274 /** 275 * Used to cache the moved-out regions 276 */ 277 private final Cache<String, MovedRegionInfo> movedRegionInfoCache = CacheBuilder.newBuilder() 278 .expireAfterWrite(movedRegionCacheExpiredTime(), TimeUnit.MILLISECONDS).build(); 279 280 private MemStoreFlusher cacheFlusher; 281 282 private HeapMemoryManager hMemManager; 283 284 // Replication services. If no replication, this handler will be null. 285 private ReplicationSourceService replicationSourceHandler; 286 private ReplicationSinkService replicationSinkHandler; 287 private boolean sameReplicationSourceAndSink; 288 289 // Compactions 290 private CompactSplit compactSplitThread; 291 292 /** 293 * Map of regions currently being served by this region server. Key is the encoded region name. 294 * All access should be synchronized. 295 */ 296 private final Map<String, HRegion> onlineRegions = new ConcurrentHashMap<>(); 297 /** 298 * Lock for gating access to {@link #onlineRegions}. TODO: If this map is gated by a lock, does it 299 * need to be a ConcurrentHashMap? 300 */ 301 private final ReentrantReadWriteLock onlineRegionsLock = new ReentrantReadWriteLock(); 302 303 /** 304 * Map of encoded region names to the DataNode locations they should be hosted on We store the 305 * value as Address since InetSocketAddress is required by the HDFS API (create() that takes 306 * favored nodes as hints for placing file blocks). We could have used ServerName here as the 307 * value class, but we'd need to convert it to InetSocketAddress at some point before the HDFS API 308 * call, and it seems a bit weird to store ServerName since ServerName refers to RegionServers and 309 * here we really mean DataNode locations. We don't store it as InetSocketAddress here because the 310 * conversion on demand from Address to InetSocketAddress will guarantee the resolution results 311 * will be fresh when we need it. 312 */ 313 private final Map<String, Address[]> regionFavoredNodesMap = new ConcurrentHashMap<>(); 314 315 private LeaseManager leaseManager; 316 317 private volatile boolean dataFsOk; 318 319 static final String ABORT_TIMEOUT = "hbase.regionserver.abort.timeout"; 320 // Default abort timeout is 1200 seconds for safe 321 private static final long DEFAULT_ABORT_TIMEOUT = 1200000; 322 // Will run this task when abort timeout 323 static final String ABORT_TIMEOUT_TASK = "hbase.regionserver.abort.timeout.task"; 324 325 // A state before we go into stopped state. At this stage we're closing user 326 // space regions. 327 private boolean stopping = false; 328 private volatile boolean killed = false; 329 330 private final int threadWakeFrequency; 331 332 private static final String PERIOD_COMPACTION = "hbase.regionserver.compaction.check.period"; 333 private final int compactionCheckFrequency; 334 private static final String PERIOD_FLUSH = "hbase.regionserver.flush.check.period"; 335 private final int flushCheckFrequency; 336 337 // Stub to do region server status calls against the master. 338 private volatile RegionServerStatusService.BlockingInterface rssStub; 339 private volatile LockService.BlockingInterface lockStub; 340 // RPC client. Used to make the stub above that does region server status checking. 341 private RpcClient rpcClient; 342 343 private UncaughtExceptionHandler uncaughtExceptionHandler; 344 345 private JvmPauseMonitor pauseMonitor; 346 347 private RSSnapshotVerifier rsSnapshotVerifier; 348 349 /** region server process name */ 350 public static final String REGIONSERVER = "regionserver"; 351 352 private MetricsRegionServer metricsRegionServer; 353 MetricsRegionServerWrapperImpl metricsRegionServerImpl; 354 355 /** 356 * Check for compactions requests. 357 */ 358 private ScheduledChore compactionChecker; 359 360 /** 361 * Check for flushes 362 */ 363 private ScheduledChore periodicFlusher; 364 365 private volatile WALFactory walFactory; 366 367 private LogRoller walRoller; 368 369 // A thread which calls reportProcedureDone 370 private RemoteProcedureResultReporter procedureResultReporter; 371 372 // flag set after we're done setting up server threads 373 final AtomicBoolean online = new AtomicBoolean(false); 374 375 // master address tracker 376 private final MasterAddressTracker masterAddressTracker; 377 378 // Log Splitting Worker 379 private SplitLogWorker splitLogWorker; 380 381 private final int shortOperationTimeout; 382 383 // Time to pause if master says 'please hold' 384 private final long retryPauseTime; 385 386 private final RegionServerAccounting regionServerAccounting; 387 388 private NamedQueueServiceChore namedQueueServiceChore = null; 389 390 // Block cache 391 private BlockCache blockCache; 392 // The cache for mob files 393 private MobFileCache mobFileCache; 394 395 /** The health check chore. */ 396 private HealthCheckChore healthCheckChore; 397 398 /** The Executor status collect chore. */ 399 private ExecutorStatusChore executorStatusChore; 400 401 /** The nonce manager chore. */ 402 private ScheduledChore nonceManagerChore; 403 404 private Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap(); 405 406 /** 407 * @deprecated since 2.4.0 and will be removed in 4.0.0. Use 408 * {@link HRegionServer#UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY} instead. 409 * @see <a href="https://issues.apache.org/jira/browse/HBASE-24667">HBASE-24667</a> 410 */ 411 @Deprecated 412 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) 413 final static String RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY = 414 "hbase.regionserver.hostname.disable.master.reversedns"; 415 416 /** 417 * HBASE-18226: This config and hbase.unsafe.regionserver.hostname are mutually exclusive. 418 * Exception will be thrown if both are used. 419 */ 420 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) 421 final static String UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY = 422 "hbase.unsafe.regionserver.hostname.disable.master.reversedns"; 423 424 /** 425 * Unique identifier for the cluster we are a part of. 426 */ 427 private String clusterId; 428 429 // chore for refreshing store files for secondary regions 430 private StorefileRefresherChore storefileRefresher; 431 432 private volatile RegionServerCoprocessorHost rsHost; 433 434 private RegionServerProcedureManagerHost rspmHost; 435 436 private RegionServerRpcQuotaManager rsQuotaManager; 437 private RegionServerSpaceQuotaManager rsSpaceQuotaManager; 438 439 /** 440 * Nonce manager. Nonces are used to make operations like increment and append idempotent in the 441 * case where client doesn't receive the response from a successful operation and retries. We 442 * track the successful ops for some time via a nonce sent by client and handle duplicate 443 * operations (currently, by failing them; in future we might use MVCC to return result). Nonces 444 * are also recovered from WAL during, recovery; however, the caveats (from HBASE-3787) are: - WAL 445 * recovery is optimized, and under high load we won't read nearly nonce-timeout worth of past 446 * records. If we don't read the records, we don't read and recover the nonces. Some WALs within 447 * nonce-timeout at recovery may not even be present due to rolling/cleanup. - There's no WAL 448 * recovery during normal region move, so nonces will not be transfered. We can have separate 449 * additional "Nonce WAL". It will just contain bunch of numbers and won't be flushed on main path 450 * - because WAL itself also contains nonces, if we only flush it before memstore flush, for a 451 * given nonce we will either see it in the WAL (if it was never flushed to disk, it will be part 452 * of recovery), or we'll see it as part of the nonce log (or both occasionally, which doesn't 453 * matter). Nonce log file can be deleted after the latest nonce in it expired. It can also be 454 * recovered during move. 455 */ 456 final ServerNonceManager nonceManager; 457 458 private BrokenStoreFileCleaner brokenStoreFileCleaner; 459 460 private RSMobFileCleanerChore rsMobFileCleanerChore; 461 462 @InterfaceAudience.Private 463 CompactedHFilesDischarger compactedFileDischarger; 464 465 private volatile ThroughputController flushThroughputController; 466 467 private SecureBulkLoadManager secureBulkLoadManager; 468 469 private FileSystemUtilizationChore fsUtilizationChore; 470 471 private BootstrapNodeManager bootstrapNodeManager; 472 473 /** 474 * True if this RegionServer is coming up in a cluster where there is no Master; means it needs to 475 * just come up and make do without a Master to talk to: e.g. in test or HRegionServer is doing 476 * other than its usual duties: e.g. as an hollowed-out host whose only purpose is as a 477 * Replication-stream sink; see HBASE-18846 for more. TODO: can this replace 478 * {@link #TEST_SKIP_REPORTING_TRANSITION} ? 479 */ 480 private final boolean masterless; 481 private static final String MASTERLESS_CONFIG_NAME = "hbase.masterless"; 482 483 /** regionserver codec list **/ 484 private static final String REGIONSERVER_CODEC = "hbase.regionserver.codecs"; 485 486 // A timer to shutdown the process if abort takes too long 487 private Timer abortMonitor; 488 489 private RegionReplicationBufferManager regionReplicationBufferManager; 490 491 /* 492 * Chore that creates replication marker rows. 493 */ 494 private ReplicationMarkerChore replicationMarkerChore; 495 496 // A timer submit requests to the PrefetchExecutor 497 private PrefetchExecutorNotifier prefetchExecutorNotifier; 498 499 /** 500 * Starts a HRegionServer at the default location. 501 * <p/> 502 * Don't start any services or managers in here in the Constructor. Defer till after we register 503 * with the Master as much as possible. See {@link #startServices}. 504 */ 505 public HRegionServer(final Configuration conf) throws IOException { 506 super(conf, "RegionServer"); // thread name 507 final Span span = TraceUtil.createSpan("HRegionServer.cxtor"); 508 try (Scope ignored = span.makeCurrent()) { 509 this.dataFsOk = true; 510 this.masterless = !clusterMode(); 511 MemorySizeUtil.validateRegionServerHeapMemoryAllocation(conf); 512 HFile.checkHFileVersion(this.conf); 513 checkCodecs(this.conf); 514 FSUtils.setupShortCircuitRead(this.conf); 515 516 // Disable usage of meta replicas in the regionserver 517 this.conf.setBoolean(HConstants.USE_META_REPLICAS, false); 518 // Config'ed params 519 this.threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); 520 this.compactionCheckFrequency = conf.getInt(PERIOD_COMPACTION, this.threadWakeFrequency); 521 this.flushCheckFrequency = conf.getInt(PERIOD_FLUSH, this.threadWakeFrequency); 522 523 boolean isNoncesEnabled = conf.getBoolean(HConstants.HBASE_RS_NONCES_ENABLED, true); 524 this.nonceManager = isNoncesEnabled ? new ServerNonceManager(this.conf) : null; 525 526 this.shortOperationTimeout = conf.getInt(HConstants.HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY, 527 HConstants.DEFAULT_HBASE_RPC_SHORTOPERATION_TIMEOUT); 528 529 this.retryPauseTime = conf.getLong(HConstants.HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME, 530 HConstants.DEFAULT_HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME); 531 532 regionServerAccounting = new RegionServerAccounting(conf); 533 534 blockCache = BlockCacheFactory.createBlockCache(conf); 535 // The call below, instantiates the DataTieringManager only when 536 // the configuration "hbase.regionserver.datatiering.enable" is set to true. 537 DataTieringManager.instantiate(conf, onlineRegions); 538 539 mobFileCache = new MobFileCache(conf); 540 541 rsSnapshotVerifier = new RSSnapshotVerifier(conf); 542 543 uncaughtExceptionHandler = 544 (t, e) -> abort("Uncaught exception in executorService thread " + t.getName(), e); 545 546 // If no master in cluster, skip trying to track one or look for a cluster status. 547 if (!this.masterless) { 548 masterAddressTracker = new MasterAddressTracker(getZooKeeper(), this); 549 masterAddressTracker.start(); 550 } else { 551 masterAddressTracker = null; 552 } 553 this.rpcServices.start(zooKeeper); 554 span.setStatus(StatusCode.OK); 555 } catch (Throwable t) { 556 // Make sure we log the exception. HRegionServer is often started via reflection and the 557 // cause of failed startup is lost. 558 TraceUtil.setError(span, t); 559 LOG.error("Failed construction RegionServer", t); 560 throw t; 561 } finally { 562 span.end(); 563 } 564 } 565 566 // HMaster should override this method to load the specific config for master 567 @Override 568 protected String getUseThisHostnameInstead(Configuration conf) throws IOException { 569 String hostname = conf.get(UNSAFE_RS_HOSTNAME_KEY); 570 if (conf.getBoolean(UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false)) { 571 if (!StringUtils.isBlank(hostname)) { 572 String msg = UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + " and " 573 + UNSAFE_RS_HOSTNAME_KEY + " are mutually exclusive. Do not set " 574 + UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + " to true while " 575 + UNSAFE_RS_HOSTNAME_KEY + " is used"; 576 throw new IOException(msg); 577 } else { 578 return DNS.getHostname(conf, DNS.ServerType.REGIONSERVER); 579 } 580 } else { 581 return hostname; 582 } 583 } 584 585 @Override 586 protected DNS.ServerType getDNSServerType() { 587 return DNS.ServerType.REGIONSERVER; 588 } 589 590 @Override 591 protected void login(UserProvider user, String host) throws IOException { 592 user.login(SecurityConstants.REGIONSERVER_KRB_KEYTAB_FILE, 593 SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, host); 594 } 595 596 @Override 597 protected String getProcessName() { 598 return REGIONSERVER; 599 } 600 601 @Override 602 protected RegionServerCoprocessorHost getCoprocessorHost() { 603 return getRegionServerCoprocessorHost(); 604 } 605 606 @Override 607 protected boolean canCreateBaseZNode() { 608 return !clusterMode(); 609 } 610 611 @Override 612 protected boolean canUpdateTableDescriptor() { 613 return false; 614 } 615 616 @Override 617 protected boolean cacheTableDescriptor() { 618 return false; 619 } 620 621 protected RSRpcServices createRpcServices() throws IOException { 622 return new RSRpcServices(this); 623 } 624 625 @Override 626 protected void configureInfoServer(InfoServer infoServer) { 627 infoServer.addUnprivilegedServlet("rs-status", "/rs-status", RSStatusServlet.class); 628 infoServer.setAttribute(REGIONSERVER, this); 629 } 630 631 @Override 632 protected Class<? extends HttpServlet> getDumpServlet() { 633 return RSDumpServlet.class; 634 } 635 636 /** 637 * Used by {@link RSDumpServlet} to generate debugging information. 638 */ 639 public void dumpRowLocks(final PrintWriter out) { 640 StringBuilder sb = new StringBuilder(); 641 for (HRegion region : getRegions()) { 642 if (region.getLockedRows().size() > 0) { 643 for (HRegion.RowLockContext rowLockContext : region.getLockedRows().values()) { 644 sb.setLength(0); 645 sb.append(region.getTableDescriptor().getTableName()).append(",") 646 .append(region.getRegionInfo().getEncodedName()).append(","); 647 sb.append(rowLockContext.toString()); 648 out.println(sb); 649 } 650 } 651 } 652 } 653 654 @Override 655 public boolean registerService(Service instance) { 656 // No stacking of instances is allowed for a single executorService name 657 ServiceDescriptor serviceDesc = instance.getDescriptorForType(); 658 String serviceName = CoprocessorRpcUtils.getServiceName(serviceDesc); 659 if (coprocessorServiceHandlers.containsKey(serviceName)) { 660 LOG.error("Coprocessor executorService " + serviceName 661 + " already registered, rejecting request from " + instance); 662 return false; 663 } 664 665 coprocessorServiceHandlers.put(serviceName, instance); 666 if (LOG.isDebugEnabled()) { 667 LOG.debug( 668 "Registered regionserver coprocessor executorService: executorService=" + serviceName); 669 } 670 return true; 671 } 672 673 /** 674 * Run test on configured codecs to make sure supporting libs are in place. 675 */ 676 private static void checkCodecs(final Configuration c) throws IOException { 677 // check to see if the codec list is available: 678 String[] codecs = c.getStrings(REGIONSERVER_CODEC, (String[]) null); 679 if (codecs == null) { 680 return; 681 } 682 for (String codec : codecs) { 683 if (!CompressionTest.testCompression(codec)) { 684 throw new IOException( 685 "Compression codec " + codec + " not supported, aborting RS construction"); 686 } 687 } 688 } 689 690 public String getClusterId() { 691 return this.clusterId; 692 } 693 694 /** 695 * All initialization needed before we go register with Master.<br> 696 * Do bare minimum. Do bulk of initializations AFTER we've connected to the Master.<br> 697 * In here we just put up the RpcServer, setup Connection, and ZooKeeper. 698 */ 699 private void preRegistrationInitialization() { 700 final Span span = TraceUtil.createSpan("HRegionServer.preRegistrationInitialization"); 701 try (Scope ignored = span.makeCurrent()) { 702 initializeZooKeeper(); 703 setupClusterConnection(); 704 bootstrapNodeManager = new BootstrapNodeManager(asyncClusterConnection, masterAddressTracker); 705 regionReplicationBufferManager = new RegionReplicationBufferManager(this); 706 // Setup RPC client for master communication 707 this.rpcClient = asyncClusterConnection.getRpcClient(); 708 span.setStatus(StatusCode.OK); 709 } catch (Throwable t) { 710 // Call stop if error or process will stick around for ever since server 711 // puts up non-daemon threads. 712 TraceUtil.setError(span, t); 713 this.rpcServices.stop(); 714 abort("Initialization of RS failed. Hence aborting RS.", t); 715 } finally { 716 span.end(); 717 } 718 } 719 720 /** 721 * Bring up connection to zk ensemble and then wait until a master for this cluster and then after 722 * that, wait until cluster 'up' flag has been set. This is the order in which master does things. 723 * <p> 724 * Finally open long-living server short-circuit connection. 725 */ 726 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "RV_RETURN_VALUE_IGNORED_BAD_PRACTICE", 727 justification = "cluster Id znode read would give us correct response") 728 private void initializeZooKeeper() throws IOException, InterruptedException { 729 // Nothing to do in here if no Master in the mix. 730 if (this.masterless) { 731 return; 732 } 733 734 // Create the master address tracker, register with zk, and start it. Then 735 // block until a master is available. No point in starting up if no master 736 // running. 737 blockAndCheckIfStopped(this.masterAddressTracker); 738 739 // Wait on cluster being up. Master will set this flag up in zookeeper 740 // when ready. 741 blockAndCheckIfStopped(this.clusterStatusTracker); 742 743 // If we are HMaster then the cluster id should have already been set. 744 if (clusterId == null) { 745 // Retrieve clusterId 746 // Since cluster status is now up 747 // ID should have already been set by HMaster 748 try { 749 clusterId = ZKClusterId.readClusterIdZNode(this.zooKeeper); 750 if (clusterId == null) { 751 this.abort("Cluster ID has not been set"); 752 } 753 LOG.info("ClusterId : " + clusterId); 754 } catch (KeeperException e) { 755 this.abort("Failed to retrieve Cluster ID", e); 756 } 757 } 758 759 if (isStopped() || isAborted()) { 760 return; // No need for further initialization 761 } 762 763 // watch for snapshots and other procedures 764 try { 765 rspmHost = new RegionServerProcedureManagerHost(); 766 rspmHost.loadProcedures(conf); 767 rspmHost.initialize(this); 768 } catch (KeeperException e) { 769 this.abort("Failed to reach coordination cluster when creating procedure handler.", e); 770 } 771 } 772 773 /** 774 * Utilty method to wait indefinitely on a znode availability while checking if the region server 775 * is shut down 776 * @param tracker znode tracker to use 777 * @throws IOException any IO exception, plus if the RS is stopped 778 * @throws InterruptedException if the waiting thread is interrupted 779 */ 780 private void blockAndCheckIfStopped(ZKNodeTracker tracker) 781 throws IOException, InterruptedException { 782 while (tracker.blockUntilAvailable(this.msgInterval, false) == null) { 783 if (this.stopped) { 784 throw new IOException("Received the shutdown message while waiting."); 785 } 786 } 787 } 788 789 /** Returns True if the cluster is up. */ 790 @Override 791 public boolean isClusterUp() { 792 return this.masterless 793 || (this.clusterStatusTracker != null && this.clusterStatusTracker.isClusterUp()); 794 } 795 796 private void initializeReplicationMarkerChore() { 797 boolean replicationMarkerEnabled = 798 conf.getBoolean(REPLICATION_MARKER_ENABLED_KEY, REPLICATION_MARKER_ENABLED_DEFAULT); 799 // If replication or replication marker is not enabled then return immediately. 800 if (replicationMarkerEnabled) { 801 int period = conf.getInt(REPLICATION_MARKER_CHORE_DURATION_KEY, 802 REPLICATION_MARKER_CHORE_DURATION_DEFAULT); 803 replicationMarkerChore = new ReplicationMarkerChore(this, this, period, conf); 804 } 805 } 806 807 @Override 808 public boolean isStopping() { 809 return stopping; 810 } 811 812 /** 813 * The HRegionServer sticks in this loop until closed. 814 */ 815 @Override 816 public void run() { 817 if (isStopped()) { 818 LOG.info("Skipping run; stopped"); 819 return; 820 } 821 try { 822 // Do pre-registration initializations; zookeeper, lease threads, etc. 823 preRegistrationInitialization(); 824 } catch (Throwable e) { 825 abort("Fatal exception during initialization", e); 826 } 827 828 try { 829 if (!isStopped() && !isAborted()) { 830 installShutdownHook(); 831 // Initialize the RegionServerCoprocessorHost now that our ephemeral 832 // node was created, in case any coprocessors want to use ZooKeeper 833 this.rsHost = new RegionServerCoprocessorHost(this, this.conf); 834 835 // Try and register with the Master; tell it we are here. Break if server is stopped or 836 // the clusterup flag is down or hdfs went wacky. Once registered successfully, go ahead and 837 // start up all Services. Use RetryCounter to get backoff in case Master is struggling to 838 // come up. 839 LOG.debug("About to register with Master."); 840 TraceUtil.trace(() -> { 841 RetryCounterFactory rcf = 842 new RetryCounterFactory(Integer.MAX_VALUE, this.sleeper.getPeriod(), 1000 * 60 * 5); 843 RetryCounter rc = rcf.create(); 844 while (keepLooping()) { 845 RegionServerStartupResponse w = reportForDuty(); 846 if (w == null) { 847 long sleepTime = rc.getBackoffTimeAndIncrementAttempts(); 848 LOG.warn("reportForDuty failed; sleeping {} ms and then retrying.", sleepTime); 849 this.sleeper.sleep(sleepTime); 850 } else { 851 handleReportForDutyResponse(w); 852 break; 853 } 854 } 855 }, "HRegionServer.registerWithMaster"); 856 } 857 858 if (!isStopped() && isHealthy()) { 859 TraceUtil.trace(() -> { 860 // start the snapshot handler and other procedure handlers, 861 // since the server is ready to run 862 if (this.rspmHost != null) { 863 this.rspmHost.start(); 864 } 865 // Start the Quota Manager 866 if (this.rsQuotaManager != null) { 867 rsQuotaManager.start(getRpcServer().getScheduler()); 868 } 869 if (this.rsSpaceQuotaManager != null) { 870 this.rsSpaceQuotaManager.start(); 871 } 872 }, "HRegionServer.startup"); 873 } 874 875 // We registered with the Master. Go into run mode. 876 long lastMsg = EnvironmentEdgeManager.currentTime(); 877 long oldRequestCount = -1; 878 // The main run loop. 879 while (!isStopped() && isHealthy()) { 880 if (!isClusterUp()) { 881 if (onlineRegions.isEmpty()) { 882 stop("Exiting; cluster shutdown set and not carrying any regions"); 883 } else if (!this.stopping) { 884 this.stopping = true; 885 LOG.info("Closing user regions"); 886 closeUserRegions(isAborted()); 887 } else { 888 boolean allUserRegionsOffline = areAllUserRegionsOffline(); 889 if (allUserRegionsOffline) { 890 // Set stopped if no more write requests tp meta tables 891 // since last time we went around the loop. Any open 892 // meta regions will be closed on our way out. 893 if (oldRequestCount == getWriteRequestCount()) { 894 stop("Stopped; only catalog regions remaining online"); 895 break; 896 } 897 oldRequestCount = getWriteRequestCount(); 898 } else { 899 // Make sure all regions have been closed -- some regions may 900 // have not got it because we were splitting at the time of 901 // the call to closeUserRegions. 902 closeUserRegions(this.abortRequested.get()); 903 } 904 LOG.debug("Waiting on " + getOnlineRegionsAsPrintableString()); 905 } 906 } 907 long now = EnvironmentEdgeManager.currentTime(); 908 if ((now - lastMsg) >= msgInterval) { 909 tryRegionServerReport(lastMsg, now); 910 lastMsg = EnvironmentEdgeManager.currentTime(); 911 } 912 if (!isStopped() && !isAborted()) { 913 this.sleeper.sleep(); 914 } 915 } // for 916 } catch (Throwable t) { 917 if (!rpcServices.checkOOME(t)) { 918 String prefix = t instanceof YouAreDeadException ? "" : "Unhandled: "; 919 abort(prefix + t.getMessage(), t); 920 } 921 } 922 923 final Span span = TraceUtil.createSpan("HRegionServer exiting main loop"); 924 try (Scope ignored = span.makeCurrent()) { 925 if (this.leaseManager != null) { 926 this.leaseManager.closeAfterLeasesExpire(); 927 } 928 if (this.splitLogWorker != null) { 929 splitLogWorker.stop(); 930 } 931 stopInfoServer(); 932 // Send cache a shutdown. 933 if (blockCache != null) { 934 blockCache.shutdown(); 935 } 936 if (mobFileCache != null) { 937 mobFileCache.shutdown(); 938 } 939 940 // Send interrupts to wake up threads if sleeping so they notice shutdown. 941 // TODO: Should we check they are alive? If OOME could have exited already 942 if (this.hMemManager != null) { 943 this.hMemManager.stop(); 944 } 945 if (this.cacheFlusher != null) { 946 this.cacheFlusher.interruptIfNecessary(); 947 } 948 if (this.compactSplitThread != null) { 949 this.compactSplitThread.interruptIfNecessary(); 950 } 951 952 // Stop the snapshot and other procedure handlers, forcefully killing all running tasks 953 if (rspmHost != null) { 954 rspmHost.stop(this.abortRequested.get() || this.killed); 955 } 956 957 if (this.killed) { 958 // Just skip out w/o closing regions. Used when testing. 959 } else if (abortRequested.get()) { 960 if (this.dataFsOk) { 961 closeUserRegions(abortRequested.get()); // Don't leave any open file handles 962 } 963 LOG.info("aborting server " + this.serverName); 964 } else { 965 closeUserRegions(abortRequested.get()); 966 LOG.info("stopping server " + this.serverName); 967 } 968 regionReplicationBufferManager.stop(); 969 closeClusterConnection(); 970 // Closing the compactSplit thread before closing meta regions 971 if (!this.killed && containsMetaTableRegions()) { 972 if (!abortRequested.get() || this.dataFsOk) { 973 if (this.compactSplitThread != null) { 974 this.compactSplitThread.join(); 975 this.compactSplitThread = null; 976 } 977 closeMetaTableRegions(abortRequested.get()); 978 } 979 } 980 981 if (!this.killed && this.dataFsOk) { 982 waitOnAllRegionsToClose(abortRequested.get()); 983 LOG.info("stopping server " + this.serverName + "; all regions closed."); 984 } 985 986 // Stop the quota manager 987 if (rsQuotaManager != null) { 988 rsQuotaManager.stop(); 989 } 990 if (rsSpaceQuotaManager != null) { 991 rsSpaceQuotaManager.stop(); 992 rsSpaceQuotaManager = null; 993 } 994 995 // flag may be changed when closing regions throws exception. 996 if (this.dataFsOk) { 997 shutdownWAL(!abortRequested.get()); 998 } 999 1000 // Make sure the proxy is down. 1001 if (this.rssStub != null) { 1002 this.rssStub = null; 1003 } 1004 if (this.lockStub != null) { 1005 this.lockStub = null; 1006 } 1007 if (this.rpcClient != null) { 1008 this.rpcClient.close(); 1009 } 1010 if (this.leaseManager != null) { 1011 this.leaseManager.close(); 1012 } 1013 if (this.pauseMonitor != null) { 1014 this.pauseMonitor.stop(); 1015 } 1016 1017 if (!killed) { 1018 stopServiceThreads(); 1019 } 1020 1021 if (this.rpcServices != null) { 1022 this.rpcServices.stop(); 1023 } 1024 1025 try { 1026 deleteMyEphemeralNode(); 1027 } catch (KeeperException.NoNodeException nn) { 1028 // pass 1029 } catch (KeeperException e) { 1030 LOG.warn("Failed deleting my ephemeral node", e); 1031 } 1032 // We may have failed to delete the znode at the previous step, but 1033 // we delete the file anyway: a second attempt to delete the znode is likely to fail again. 1034 ZNodeClearer.deleteMyEphemeralNodeOnDisk(); 1035 1036 closeZooKeeper(); 1037 closeTableDescriptors(); 1038 LOG.info("Exiting; stopping=" + this.serverName + "; zookeeper connection closed."); 1039 span.setStatus(StatusCode.OK); 1040 } finally { 1041 span.end(); 1042 } 1043 } 1044 1045 private boolean containsMetaTableRegions() { 1046 return onlineRegions.containsKey(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName()); 1047 } 1048 1049 private boolean areAllUserRegionsOffline() { 1050 if (getNumberOfOnlineRegions() > 2) { 1051 return false; 1052 } 1053 boolean allUserRegionsOffline = true; 1054 for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) { 1055 if (!e.getValue().getRegionInfo().isMetaRegion()) { 1056 allUserRegionsOffline = false; 1057 break; 1058 } 1059 } 1060 return allUserRegionsOffline; 1061 } 1062 1063 /** Returns Current write count for all online regions. */ 1064 private long getWriteRequestCount() { 1065 long writeCount = 0; 1066 for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) { 1067 writeCount += e.getValue().getWriteRequestsCount(); 1068 } 1069 return writeCount; 1070 } 1071 1072 @InterfaceAudience.Private 1073 protected void tryRegionServerReport(long reportStartTime, long reportEndTime) 1074 throws IOException { 1075 RegionServerStatusService.BlockingInterface rss = rssStub; 1076 if (rss == null) { 1077 // the current server could be stopping. 1078 return; 1079 } 1080 ClusterStatusProtos.ServerLoad sl = buildServerLoad(reportStartTime, reportEndTime); 1081 final Span span = TraceUtil.createSpan("HRegionServer.tryRegionServerReport"); 1082 try (Scope ignored = span.makeCurrent()) { 1083 RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder(); 1084 request.setServer(ProtobufUtil.toServerName(this.serverName)); 1085 request.setLoad(sl); 1086 rss.regionServerReport(null, request.build()); 1087 span.setStatus(StatusCode.OK); 1088 } catch (ServiceException se) { 1089 IOException ioe = ProtobufUtil.getRemoteException(se); 1090 if (ioe instanceof YouAreDeadException) { 1091 // This will be caught and handled as a fatal error in run() 1092 TraceUtil.setError(span, ioe); 1093 throw ioe; 1094 } 1095 if (rssStub == rss) { 1096 rssStub = null; 1097 } 1098 TraceUtil.setError(span, se); 1099 // Couldn't connect to the master, get location from zk and reconnect 1100 // Method blocks until new master is found or we are stopped 1101 createRegionServerStatusStub(true); 1102 } finally { 1103 span.end(); 1104 } 1105 } 1106 1107 /** 1108 * Reports the given map of Regions and their size on the filesystem to the active Master. 1109 * @param regionSizeStore The store containing region sizes 1110 * @return false if FileSystemUtilizationChore should pause reporting to master. true otherwise 1111 */ 1112 public boolean reportRegionSizesForQuotas(RegionSizeStore regionSizeStore) { 1113 RegionServerStatusService.BlockingInterface rss = rssStub; 1114 if (rss == null) { 1115 // the current server could be stopping. 1116 LOG.trace("Skipping Region size report to HMaster as stub is null"); 1117 return true; 1118 } 1119 try { 1120 buildReportAndSend(rss, regionSizeStore); 1121 } catch (ServiceException se) { 1122 IOException ioe = ProtobufUtil.getRemoteException(se); 1123 if (ioe instanceof PleaseHoldException) { 1124 LOG.trace("Failed to report region sizes to Master because it is initializing." 1125 + " This will be retried.", ioe); 1126 // The Master is coming up. Will retry the report later. Avoid re-creating the stub. 1127 return true; 1128 } 1129 if (rssStub == rss) { 1130 rssStub = null; 1131 } 1132 createRegionServerStatusStub(true); 1133 if (ioe instanceof DoNotRetryIOException) { 1134 DoNotRetryIOException doNotRetryEx = (DoNotRetryIOException) ioe; 1135 if (doNotRetryEx.getCause() != null) { 1136 Throwable t = doNotRetryEx.getCause(); 1137 if (t instanceof UnsupportedOperationException) { 1138 LOG.debug("master doesn't support ReportRegionSpaceUse, pause before retrying"); 1139 return false; 1140 } 1141 } 1142 } 1143 LOG.debug("Failed to report region sizes to Master. This will be retried.", ioe); 1144 } 1145 return true; 1146 } 1147 1148 /** 1149 * Builds the region size report and sends it to the master. Upon successful sending of the 1150 * report, the region sizes that were sent are marked as sent. 1151 * @param rss The stub to send to the Master 1152 * @param regionSizeStore The store containing region sizes 1153 */ 1154 private void buildReportAndSend(RegionServerStatusService.BlockingInterface rss, 1155 RegionSizeStore regionSizeStore) throws ServiceException { 1156 RegionSpaceUseReportRequest request = 1157 buildRegionSpaceUseReportRequest(Objects.requireNonNull(regionSizeStore)); 1158 rss.reportRegionSpaceUse(null, request); 1159 // Record the number of size reports sent 1160 if (metricsRegionServer != null) { 1161 metricsRegionServer.incrementNumRegionSizeReportsSent(regionSizeStore.size()); 1162 } 1163 } 1164 1165 /** 1166 * Builds a {@link RegionSpaceUseReportRequest} protobuf message from the region size map. 1167 * @param regionSizes The size in bytes of regions 1168 * @return The corresponding protocol buffer message. 1169 */ 1170 RegionSpaceUseReportRequest buildRegionSpaceUseReportRequest(RegionSizeStore regionSizes) { 1171 RegionSpaceUseReportRequest.Builder request = RegionSpaceUseReportRequest.newBuilder(); 1172 for (Entry<RegionInfo, RegionSize> entry : regionSizes) { 1173 request.addSpaceUse(convertRegionSize(entry.getKey(), entry.getValue().getSize())); 1174 } 1175 return request.build(); 1176 } 1177 1178 /** 1179 * Converts a pair of {@link RegionInfo} and {@code long} into a {@link RegionSpaceUse} protobuf 1180 * message. 1181 * @param regionInfo The RegionInfo 1182 * @param sizeInBytes The size in bytes of the Region 1183 * @return The protocol buffer 1184 */ 1185 RegionSpaceUse convertRegionSize(RegionInfo regionInfo, Long sizeInBytes) { 1186 return RegionSpaceUse.newBuilder() 1187 .setRegionInfo(ProtobufUtil.toRegionInfo(Objects.requireNonNull(regionInfo))) 1188 .setRegionSize(Objects.requireNonNull(sizeInBytes)).build(); 1189 } 1190 1191 private ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) 1192 throws IOException { 1193 // We're getting the MetricsRegionServerWrapper here because the wrapper computes requests 1194 // per second, and other metrics As long as metrics are part of ServerLoad it's best to use 1195 // the wrapper to compute those numbers in one place. 1196 // In the long term most of these should be moved off of ServerLoad and the heart beat. 1197 // Instead they should be stored in an HBase table so that external visibility into HBase is 1198 // improved; Additionally the load balancer will be able to take advantage of a more complete 1199 // history. 1200 MetricsRegionServerWrapper regionServerWrapper = metricsRegionServer.getRegionServerWrapper(); 1201 Collection<HRegion> regions = getOnlineRegionsLocalContext(); 1202 long usedMemory = -1L; 1203 long maxMemory = -1L; 1204 final MemoryUsage usage = MemorySizeUtil.safeGetHeapMemoryUsage(); 1205 if (usage != null) { 1206 usedMemory = usage.getUsed(); 1207 maxMemory = usage.getMax(); 1208 } 1209 1210 ClusterStatusProtos.ServerLoad.Builder serverLoad = ClusterStatusProtos.ServerLoad.newBuilder(); 1211 serverLoad.setNumberOfRequests((int) regionServerWrapper.getRequestsPerSecond()); 1212 serverLoad.setTotalNumberOfRequests(regionServerWrapper.getTotalRequestCount()); 1213 serverLoad.setUsedHeapMB((int) (usedMemory / 1024 / 1024)); 1214 serverLoad.setMaxHeapMB((int) (maxMemory / 1024 / 1024)); 1215 serverLoad.setReadRequestsCount(this.metricsRegionServerImpl.getReadRequestsCount()); 1216 serverLoad.setWriteRequestsCount(this.metricsRegionServerImpl.getWriteRequestsCount()); 1217 Set<String> coprocessors = getWAL(null).getCoprocessorHost().getCoprocessors(); 1218 Coprocessor.Builder coprocessorBuilder = Coprocessor.newBuilder(); 1219 for (String coprocessor : coprocessors) { 1220 serverLoad.addCoprocessors(coprocessorBuilder.setName(coprocessor).build()); 1221 } 1222 RegionLoad.Builder regionLoadBldr = RegionLoad.newBuilder(); 1223 RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder(); 1224 for (HRegion region : regions) { 1225 if (region.getCoprocessorHost() != null) { 1226 Set<String> regionCoprocessors = region.getCoprocessorHost().getCoprocessors(); 1227 for (String regionCoprocessor : regionCoprocessors) { 1228 serverLoad.addCoprocessors(coprocessorBuilder.setName(regionCoprocessor).build()); 1229 } 1230 } 1231 serverLoad.addRegionLoads(createRegionLoad(region, regionLoadBldr, regionSpecifier)); 1232 for (String coprocessor : getWAL(region.getRegionInfo()).getCoprocessorHost() 1233 .getCoprocessors()) { 1234 serverLoad.addCoprocessors(coprocessorBuilder.setName(coprocessor).build()); 1235 } 1236 } 1237 1238 getBlockCache().ifPresent(cache -> { 1239 cache.getRegionCachedInfo().ifPresent(regionCachedInfo -> { 1240 regionCachedInfo.forEach((regionName, prefetchSize) -> { 1241 serverLoad.putRegionCachedInfo(regionName, roundSize(prefetchSize, unitMB)); 1242 }); 1243 }); 1244 }); 1245 serverLoad.setReportStartTime(reportStartTime); 1246 serverLoad.setReportEndTime(reportEndTime); 1247 if (this.infoServer != null) { 1248 serverLoad.setInfoServerPort(this.infoServer.getPort()); 1249 } else { 1250 serverLoad.setInfoServerPort(-1); 1251 } 1252 MetricsUserAggregateSource userSource = 1253 metricsRegionServer.getMetricsUserAggregate().getSource(); 1254 if (userSource != null) { 1255 Map<String, MetricsUserSource> userMetricMap = userSource.getUserSources(); 1256 for (Entry<String, MetricsUserSource> entry : userMetricMap.entrySet()) { 1257 serverLoad.addUserLoads(createUserLoad(entry.getKey(), entry.getValue())); 1258 } 1259 } 1260 1261 if (sameReplicationSourceAndSink && replicationSourceHandler != null) { 1262 // always refresh first to get the latest value 1263 ReplicationLoad rLoad = replicationSourceHandler.refreshAndGetReplicationLoad(); 1264 if (rLoad != null) { 1265 serverLoad.setReplLoadSink(rLoad.getReplicationLoadSink()); 1266 for (ClusterStatusProtos.ReplicationLoadSource rLS : rLoad 1267 .getReplicationLoadSourceEntries()) { 1268 serverLoad.addReplLoadSource(rLS); 1269 } 1270 } 1271 } else { 1272 if (replicationSourceHandler != null) { 1273 ReplicationLoad rLoad = replicationSourceHandler.refreshAndGetReplicationLoad(); 1274 if (rLoad != null) { 1275 for (ClusterStatusProtos.ReplicationLoadSource rLS : rLoad 1276 .getReplicationLoadSourceEntries()) { 1277 serverLoad.addReplLoadSource(rLS); 1278 } 1279 } 1280 } 1281 if (replicationSinkHandler != null) { 1282 ReplicationLoad rLoad = replicationSinkHandler.refreshAndGetReplicationLoad(); 1283 if (rLoad != null) { 1284 serverLoad.setReplLoadSink(rLoad.getReplicationLoadSink()); 1285 } 1286 } 1287 } 1288 1289 TaskMonitor.get().getTasks().forEach(task -> serverLoad.addTasks(ClusterStatusProtos.ServerTask 1290 .newBuilder().setDescription(task.getDescription()) 1291 .setStatus(task.getStatus() != null ? task.getStatus() : "") 1292 .setState(ClusterStatusProtos.ServerTask.State.valueOf(task.getState().name())) 1293 .setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTimestamp()).build())); 1294 1295 return serverLoad.build(); 1296 } 1297 1298 private String getOnlineRegionsAsPrintableString() { 1299 StringBuilder sb = new StringBuilder(); 1300 for (Region r : this.onlineRegions.values()) { 1301 if (sb.length() > 0) { 1302 sb.append(", "); 1303 } 1304 sb.append(r.getRegionInfo().getEncodedName()); 1305 } 1306 return sb.toString(); 1307 } 1308 1309 /** 1310 * Wait on regions close. 1311 */ 1312 private void waitOnAllRegionsToClose(final boolean abort) { 1313 // Wait till all regions are closed before going out. 1314 int lastCount = -1; 1315 long previousLogTime = 0; 1316 Set<String> closedRegions = new HashSet<>(); 1317 boolean interrupted = false; 1318 try { 1319 while (!onlineRegions.isEmpty()) { 1320 int count = getNumberOfOnlineRegions(); 1321 // Only print a message if the count of regions has changed. 1322 if (count != lastCount) { 1323 // Log every second at most 1324 if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) { 1325 previousLogTime = EnvironmentEdgeManager.currentTime(); 1326 lastCount = count; 1327 LOG.info("Waiting on " + count + " regions to close"); 1328 // Only print out regions still closing if a small number else will 1329 // swamp the log. 1330 if (count < 10 && LOG.isDebugEnabled()) { 1331 LOG.debug("Online Regions=" + this.onlineRegions); 1332 } 1333 } 1334 } 1335 // Ensure all user regions have been sent a close. Use this to 1336 // protect against the case where an open comes in after we start the 1337 // iterator of onlineRegions to close all user regions. 1338 for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) { 1339 RegionInfo hri = e.getValue().getRegionInfo(); 1340 if ( 1341 !this.regionsInTransitionInRS.containsKey(hri.getEncodedNameAsBytes()) 1342 && !closedRegions.contains(hri.getEncodedName()) 1343 ) { 1344 closedRegions.add(hri.getEncodedName()); 1345 // Don't update zk with this close transition; pass false. 1346 closeRegionIgnoreErrors(hri, abort); 1347 } 1348 } 1349 // No regions in RIT, we could stop waiting now. 1350 if (this.regionsInTransitionInRS.isEmpty()) { 1351 if (!onlineRegions.isEmpty()) { 1352 LOG.info("We were exiting though online regions are not empty," 1353 + " because some regions failed closing"); 1354 } 1355 break; 1356 } else { 1357 LOG.debug("Waiting on {}", this.regionsInTransitionInRS.keySet().stream() 1358 .map(e -> Bytes.toString(e)).collect(Collectors.joining(", "))); 1359 } 1360 if (sleepInterrupted(200)) { 1361 interrupted = true; 1362 } 1363 } 1364 } finally { 1365 if (interrupted) { 1366 Thread.currentThread().interrupt(); 1367 } 1368 } 1369 } 1370 1371 private static boolean sleepInterrupted(long millis) { 1372 boolean interrupted = false; 1373 try { 1374 Thread.sleep(millis); 1375 } catch (InterruptedException e) { 1376 LOG.warn("Interrupted while sleeping"); 1377 interrupted = true; 1378 } 1379 return interrupted; 1380 } 1381 1382 private void shutdownWAL(final boolean close) { 1383 if (this.walFactory != null) { 1384 try { 1385 if (close) { 1386 walFactory.close(); 1387 } else { 1388 walFactory.shutdown(); 1389 } 1390 } catch (Throwable e) { 1391 e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e; 1392 LOG.error("Shutdown / close of WAL failed: " + e); 1393 LOG.debug("Shutdown / close exception details:", e); 1394 } 1395 } 1396 } 1397 1398 /** 1399 * Run init. Sets up wal and starts up all server threads. 1400 * @param c Extra configuration. 1401 */ 1402 protected void handleReportForDutyResponse(final RegionServerStartupResponse c) 1403 throws IOException { 1404 try { 1405 boolean updateRootDir = false; 1406 for (NameStringPair e : c.getMapEntriesList()) { 1407 String key = e.getName(); 1408 // The hostname the master sees us as. 1409 if (key.equals(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)) { 1410 String hostnameFromMasterPOV = e.getValue(); 1411 this.serverName = ServerName.valueOf(hostnameFromMasterPOV, 1412 rpcServices.getSocketAddress().getPort(), this.startcode); 1413 String expectedHostName = rpcServices.getSocketAddress().getHostName(); 1414 // if Master use-ip is enabled, RegionServer use-ip will be enabled by default even if it 1415 // is set to disable. so we will use the ip of the RegionServer to compare with the 1416 // hostname passed by the Master, see HBASE-27304 for details. 1417 if ( 1418 StringUtils.isBlank(useThisHostnameInstead) && getActiveMaster().isPresent() 1419 && InetAddresses.isInetAddress(getActiveMaster().get().getHostname()) 1420 ) { 1421 expectedHostName = rpcServices.getSocketAddress().getAddress().getHostAddress(); 1422 } 1423 boolean isHostnameConsist = StringUtils.isBlank(useThisHostnameInstead) 1424 ? hostnameFromMasterPOV.equals(expectedHostName) 1425 : hostnameFromMasterPOV.equals(useThisHostnameInstead); 1426 if (!isHostnameConsist) { 1427 String msg = "Master passed us a different hostname to use; was=" 1428 + (StringUtils.isBlank(useThisHostnameInstead) 1429 ? expectedHostName 1430 : this.useThisHostnameInstead) 1431 + ", but now=" + hostnameFromMasterPOV; 1432 LOG.error(msg); 1433 throw new IOException(msg); 1434 } 1435 continue; 1436 } 1437 1438 String value = e.getValue(); 1439 if (key.equals(HConstants.HBASE_DIR)) { 1440 if (value != null && !value.equals(conf.get(HConstants.HBASE_DIR))) { 1441 updateRootDir = true; 1442 } 1443 } 1444 1445 if (LOG.isDebugEnabled()) { 1446 LOG.debug("Config from master: " + key + "=" + value); 1447 } 1448 this.conf.set(key, value); 1449 } 1450 // Set our ephemeral znode up in zookeeper now we have a name. 1451 createMyEphemeralNode(); 1452 1453 if (updateRootDir) { 1454 // initialize file system by the config fs.defaultFS and hbase.rootdir from master 1455 initializeFileSystem(); 1456 } 1457 1458 // hack! Maps DFSClient => RegionServer for logs. HDFS made this 1459 // config param for task trackers, but we can piggyback off of it. 1460 if (this.conf.get("mapreduce.task.attempt.id") == null) { 1461 this.conf.set("mapreduce.task.attempt.id", "hb_rs_" + this.serverName.toString()); 1462 } 1463 1464 // Save it in a file, this will allow to see if we crash 1465 ZNodeClearer.writeMyEphemeralNodeOnDisk(getMyEphemeralNodePath()); 1466 1467 // This call sets up an initialized replication and WAL. Later we start it up. 1468 setupWALAndReplication(); 1469 // Init in here rather than in constructor after thread name has been set 1470 final MetricsTable metricsTable = 1471 new MetricsTable(new MetricsTableWrapperAggregateImpl(this)); 1472 this.metricsRegionServerImpl = new MetricsRegionServerWrapperImpl(this); 1473 this.metricsRegionServer = 1474 new MetricsRegionServer(metricsRegionServerImpl, conf, metricsTable); 1475 // Now that we have a metrics source, start the pause monitor 1476 this.pauseMonitor = new JvmPauseMonitor(conf, getMetrics().getMetricsSource()); 1477 pauseMonitor.start(); 1478 1479 // There is a rare case where we do NOT want services to start. Check config. 1480 if (getConfiguration().getBoolean("hbase.regionserver.workers", true)) { 1481 startServices(); 1482 } 1483 // In here we start up the replication Service. Above we initialized it. TODO. Reconcile. 1484 // or make sense of it. 1485 startReplicationService(); 1486 1487 // Set up ZK 1488 LOG.info("Serving as " + this.serverName + ", RpcServer on " + rpcServices.getSocketAddress() 1489 + ", sessionid=0x" 1490 + Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId())); 1491 1492 // Wake up anyone waiting for this server to online 1493 synchronized (online) { 1494 online.set(true); 1495 online.notifyAll(); 1496 } 1497 } catch (Throwable e) { 1498 stop("Failed initialization"); 1499 throw convertThrowableToIOE(cleanup(e, "Failed init"), "Region server startup failed"); 1500 } finally { 1501 sleeper.skipSleepCycle(); 1502 } 1503 } 1504 1505 private void startHeapMemoryManager() { 1506 if (this.blockCache != null) { 1507 this.hMemManager = 1508 new HeapMemoryManager(this.blockCache, this.cacheFlusher, this, regionServerAccounting); 1509 this.hMemManager.start(getChoreService()); 1510 } 1511 } 1512 1513 private void createMyEphemeralNode() throws KeeperException { 1514 RegionServerInfo.Builder rsInfo = RegionServerInfo.newBuilder(); 1515 rsInfo.setInfoPort(infoServer != null ? infoServer.getPort() : -1); 1516 rsInfo.setVersionInfo(ProtobufUtil.getVersionInfo()); 1517 byte[] data = ProtobufUtil.prependPBMagic(rsInfo.build().toByteArray()); 1518 ZKUtil.createEphemeralNodeAndWatch(this.zooKeeper, getMyEphemeralNodePath(), data); 1519 } 1520 1521 private void deleteMyEphemeralNode() throws KeeperException { 1522 ZKUtil.deleteNode(this.zooKeeper, getMyEphemeralNodePath()); 1523 } 1524 1525 @Override 1526 public RegionServerAccounting getRegionServerAccounting() { 1527 return regionServerAccounting; 1528 } 1529 1530 // Round the size with KB or MB. 1531 // A trick here is that if the sizeInBytes is less than sizeUnit, we will round the size to 1 1532 // instead of 0 if it is not 0, to avoid some schedulers think the region has no data. See 1533 // HBASE-26340 for more details on why this is important. 1534 private static int roundSize(long sizeInByte, int sizeUnit) { 1535 if (sizeInByte == 0) { 1536 return 0; 1537 } else if (sizeInByte < sizeUnit) { 1538 return 1; 1539 } else { 1540 return (int) Math.min(sizeInByte / sizeUnit, Integer.MAX_VALUE); 1541 } 1542 } 1543 1544 /** 1545 * @param r Region to get RegionLoad for. 1546 * @param regionLoadBldr the RegionLoad.Builder, can be null 1547 * @param regionSpecifier the RegionSpecifier.Builder, can be null 1548 * @return RegionLoad instance. 1549 */ 1550 RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, 1551 RegionSpecifier.Builder regionSpecifier) throws IOException { 1552 byte[] name = r.getRegionInfo().getRegionName(); 1553 String regionEncodedName = r.getRegionInfo().getEncodedName(); 1554 int stores = 0; 1555 int storefiles = 0; 1556 int storeRefCount = 0; 1557 int maxCompactedStoreFileRefCount = 0; 1558 long storeUncompressedSize = 0L; 1559 long storefileSize = 0L; 1560 long storefileIndexSize = 0L; 1561 long rootLevelIndexSize = 0L; 1562 long totalStaticIndexSize = 0L; 1563 long totalStaticBloomSize = 0L; 1564 long totalCompactingKVs = 0L; 1565 long currentCompactedKVs = 0L; 1566 long totalRegionSize = 0L; 1567 List<HStore> storeList = r.getStores(); 1568 stores += storeList.size(); 1569 for (HStore store : storeList) { 1570 storefiles += store.getStorefilesCount(); 1571 int currentStoreRefCount = store.getStoreRefCount(); 1572 storeRefCount += currentStoreRefCount; 1573 int currentMaxCompactedStoreFileRefCount = store.getMaxCompactedStoreFileRefCount(); 1574 maxCompactedStoreFileRefCount = 1575 Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount); 1576 storeUncompressedSize += store.getStoreSizeUncompressed(); 1577 storefileSize += store.getStorefilesSize(); 1578 totalRegionSize += store.getHFilesSize(); 1579 // TODO: storefileIndexSizeKB is same with rootLevelIndexSizeKB? 1580 storefileIndexSize += store.getStorefilesRootLevelIndexSize(); 1581 CompactionProgress progress = store.getCompactionProgress(); 1582 if (progress != null) { 1583 totalCompactingKVs += progress.getTotalCompactingKVs(); 1584 currentCompactedKVs += progress.currentCompactedKVs; 1585 } 1586 rootLevelIndexSize += store.getStorefilesRootLevelIndexSize(); 1587 totalStaticIndexSize += store.getTotalStaticIndexSize(); 1588 totalStaticBloomSize += store.getTotalStaticBloomSize(); 1589 } 1590 1591 int memstoreSizeMB = roundSize(r.getMemStoreDataSize(), unitMB); 1592 int storeUncompressedSizeMB = roundSize(storeUncompressedSize, unitMB); 1593 int storefileSizeMB = roundSize(storefileSize, unitMB); 1594 int storefileIndexSizeKB = roundSize(storefileIndexSize, unitKB); 1595 int rootLevelIndexSizeKB = roundSize(rootLevelIndexSize, unitKB); 1596 int totalStaticIndexSizeKB = roundSize(totalStaticIndexSize, unitKB); 1597 int totalStaticBloomSizeKB = roundSize(totalStaticBloomSize, unitKB); 1598 int regionSizeMB = roundSize(totalRegionSize, unitMB); 1599 final MutableFloat currentRegionCachedRatio = new MutableFloat(0.0f); 1600 getBlockCache().ifPresent(bc -> { 1601 bc.getRegionCachedInfo().ifPresent(regionCachedInfo -> { 1602 if (regionCachedInfo.containsKey(regionEncodedName)) { 1603 currentRegionCachedRatio.setValue(regionSizeMB == 0 1604 ? 0.0f 1605 : (float) roundSize(regionCachedInfo.get(regionEncodedName), unitMB) / regionSizeMB); 1606 } 1607 }); 1608 }); 1609 final MutableFloat currentRegionColdDataRatio = new MutableFloat(0.0f); 1610 if (DataTieringManager.getInstance() != null) { 1611 DataTieringManager.getInstance().getRegionColdDataSize().computeIfPresent(regionEncodedName, 1612 (k, v) -> { 1613 int coldSizeMB = roundSize(v.getSecond(), unitMB); 1614 currentRegionColdDataRatio 1615 .setValue(regionSizeMB == 0 ? 0.0f : (float) coldSizeMB / regionSizeMB); 1616 return v; 1617 }); 1618 } 1619 1620 HDFSBlocksDistribution hdfsBd = r.getHDFSBlocksDistribution(); 1621 float dataLocality = hdfsBd.getBlockLocalityIndex(serverName.getHostname()); 1622 float dataLocalityForSsd = hdfsBd.getBlockLocalityIndexForSsd(serverName.getHostname()); 1623 long blocksTotalWeight = hdfsBd.getUniqueBlocksTotalWeight(); 1624 long blocksLocalWeight = hdfsBd.getBlocksLocalWeight(serverName.getHostname()); 1625 long blocksLocalWithSsdWeight = hdfsBd.getBlocksLocalWithSsdWeight(serverName.getHostname()); 1626 if (regionLoadBldr == null) { 1627 regionLoadBldr = RegionLoad.newBuilder(); 1628 } 1629 if (regionSpecifier == null) { 1630 regionSpecifier = RegionSpecifier.newBuilder(); 1631 } 1632 1633 regionSpecifier.setType(RegionSpecifierType.REGION_NAME); 1634 regionSpecifier.setValue(UnsafeByteOperations.unsafeWrap(name)); 1635 regionLoadBldr.setRegionSpecifier(regionSpecifier.build()).setStores(stores) 1636 .setStorefiles(storefiles).setStoreRefCount(storeRefCount) 1637 .setMaxCompactedStoreFileRefCount(maxCompactedStoreFileRefCount) 1638 .setStoreUncompressedSizeMB(storeUncompressedSizeMB).setStorefileSizeMB(storefileSizeMB) 1639 .setMemStoreSizeMB(memstoreSizeMB).setStorefileIndexSizeKB(storefileIndexSizeKB) 1640 .setRootIndexSizeKB(rootLevelIndexSizeKB).setTotalStaticIndexSizeKB(totalStaticIndexSizeKB) 1641 .setTotalStaticBloomSizeKB(totalStaticBloomSizeKB) 1642 .setReadRequestsCount(r.getReadRequestsCount()).setCpRequestsCount(r.getCpRequestsCount()) 1643 .setFilteredReadRequestsCount(r.getFilteredReadRequestsCount()) 1644 .setWriteRequestsCount(r.getWriteRequestsCount()).setTotalCompactingKVs(totalCompactingKVs) 1645 .setCurrentCompactedKVs(currentCompactedKVs).setDataLocality(dataLocality) 1646 .setDataLocalityForSsd(dataLocalityForSsd).setBlocksLocalWeight(blocksLocalWeight) 1647 .setBlocksLocalWithSsdWeight(blocksLocalWithSsdWeight).setBlocksTotalWeight(blocksTotalWeight) 1648 .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad(r.getCompactionState())) 1649 .setLastMajorCompactionTs(r.getOldestHfileTs(true)).setRegionSizeMB(regionSizeMB) 1650 .setCurrentRegionCachedRatio(currentRegionCachedRatio.floatValue()) 1651 .setCurrentRegionColdDataRatio(currentRegionColdDataRatio.floatValue()); 1652 r.setCompleteSequenceId(regionLoadBldr); 1653 return regionLoadBldr.build(); 1654 } 1655 1656 private UserLoad createUserLoad(String user, MetricsUserSource userSource) { 1657 UserLoad.Builder userLoadBldr = UserLoad.newBuilder(); 1658 userLoadBldr.setUserName(user); 1659 userSource.getClientMetrics().values().stream() 1660 .map(clientMetrics -> ClusterStatusProtos.ClientMetrics.newBuilder() 1661 .setHostName(clientMetrics.getHostName()) 1662 .setWriteRequestsCount(clientMetrics.getWriteRequestsCount()) 1663 .setFilteredRequestsCount(clientMetrics.getFilteredReadRequests()) 1664 .setReadRequestsCount(clientMetrics.getReadRequestsCount()).build()) 1665 .forEach(userLoadBldr::addClientMetrics); 1666 return userLoadBldr.build(); 1667 } 1668 1669 public RegionLoad createRegionLoad(final String encodedRegionName) throws IOException { 1670 HRegion r = onlineRegions.get(encodedRegionName); 1671 return r != null ? createRegionLoad(r, null, null) : null; 1672 } 1673 1674 /** 1675 * Inner class that runs on a long period checking if regions need compaction. 1676 */ 1677 private static class CompactionChecker extends ScheduledChore { 1678 private final HRegionServer instance; 1679 private final int majorCompactPriority; 1680 private final static int DEFAULT_PRIORITY = Integer.MAX_VALUE; 1681 // Iteration is 1-based rather than 0-based so we don't check for compaction 1682 // immediately upon region server startup 1683 private long iteration = 1; 1684 1685 CompactionChecker(final HRegionServer h, final int sleepTime, final Stoppable stopper) { 1686 super("CompactionChecker", stopper, sleepTime); 1687 this.instance = h; 1688 LOG.info(this.getName() + " runs every " + Duration.ofMillis(sleepTime)); 1689 1690 /* 1691 * MajorCompactPriority is configurable. If not set, the compaction will use default priority. 1692 */ 1693 this.majorCompactPriority = this.instance.conf 1694 .getInt("hbase.regionserver.compactionChecker.majorCompactPriority", DEFAULT_PRIORITY); 1695 } 1696 1697 @Override 1698 protected void chore() { 1699 for (HRegion hr : this.instance.onlineRegions.values()) { 1700 // If region is read only or compaction is disabled at table level, there's no need to 1701 // iterate through region's stores 1702 if (hr == null || hr.isReadOnly() || !hr.getTableDescriptor().isCompactionEnabled()) { 1703 continue; 1704 } 1705 1706 for (HStore s : hr.stores.values()) { 1707 try { 1708 long multiplier = s.getCompactionCheckMultiplier(); 1709 assert multiplier > 0; 1710 if (iteration % multiplier != 0) { 1711 continue; 1712 } 1713 if (s.needsCompaction()) { 1714 // Queue a compaction. Will recognize if major is needed. 1715 this.instance.compactSplitThread.requestSystemCompaction(hr, s, 1716 getName() + " requests compaction"); 1717 } else if (s.shouldPerformMajorCompaction()) { 1718 s.triggerMajorCompaction(); 1719 if ( 1720 majorCompactPriority == DEFAULT_PRIORITY 1721 || majorCompactPriority > hr.getCompactPriority() 1722 ) { 1723 this.instance.compactSplitThread.requestCompaction(hr, s, 1724 getName() + " requests major compaction; use default priority", Store.NO_PRIORITY, 1725 CompactionLifeCycleTracker.DUMMY, null); 1726 } else { 1727 this.instance.compactSplitThread.requestCompaction(hr, s, 1728 getName() + " requests major compaction; use configured priority", 1729 this.majorCompactPriority, CompactionLifeCycleTracker.DUMMY, null); 1730 } 1731 } 1732 } catch (IOException e) { 1733 LOG.warn("Failed major compaction check on " + hr, e); 1734 } 1735 } 1736 } 1737 iteration = (iteration == Long.MAX_VALUE) ? 0 : (iteration + 1); 1738 } 1739 } 1740 1741 private static class PeriodicMemStoreFlusher extends ScheduledChore { 1742 private final HRegionServer server; 1743 private final static int RANGE_OF_DELAY = 5 * 60; // 5 min in seconds 1744 private final static int MIN_DELAY_TIME = 0; // millisec 1745 private final long rangeOfDelayMs; 1746 1747 PeriodicMemStoreFlusher(int cacheFlushInterval, final HRegionServer server) { 1748 super("MemstoreFlusherChore", server, cacheFlushInterval); 1749 this.server = server; 1750 1751 final long configuredRangeOfDelay = server.getConfiguration() 1752 .getInt("hbase.regionserver.periodicmemstoreflusher.rangeofdelayseconds", RANGE_OF_DELAY); 1753 this.rangeOfDelayMs = TimeUnit.SECONDS.toMillis(configuredRangeOfDelay); 1754 } 1755 1756 @Override 1757 protected void chore() { 1758 final StringBuilder whyFlush = new StringBuilder(); 1759 for (HRegion r : this.server.onlineRegions.values()) { 1760 if (r == null) { 1761 continue; 1762 } 1763 if (r.shouldFlush(whyFlush)) { 1764 FlushRequester requester = server.getFlushRequester(); 1765 if (requester != null) { 1766 long delay = ThreadLocalRandom.current().nextLong(rangeOfDelayMs) + MIN_DELAY_TIME; 1767 // Throttle the flushes by putting a delay. If we don't throttle, and there 1768 // is a balanced write-load on the regions in a table, we might end up 1769 // overwhelming the filesystem with too many flushes at once. 1770 if (requester.requestDelayedFlush(r, delay)) { 1771 LOG.info("{} requesting flush of {} because {} after random delay {} ms", getName(), 1772 r.getRegionInfo().getRegionNameAsString(), whyFlush.toString(), delay); 1773 } 1774 } 1775 } 1776 } 1777 } 1778 } 1779 1780 /** 1781 * Report the status of the server. A server is online once all the startup is completed (setting 1782 * up filesystem, starting executorService threads, etc.). This method is designed mostly to be 1783 * useful in tests. 1784 * @return true if online, false if not. 1785 */ 1786 public boolean isOnline() { 1787 return online.get(); 1788 } 1789 1790 /** 1791 * Setup WAL log and replication if enabled. Replication setup is done in here because it wants to 1792 * be hooked up to WAL. 1793 */ 1794 private void setupWALAndReplication() throws IOException { 1795 WALFactory factory = new WALFactory(conf, serverName, this); 1796 // TODO Replication make assumptions here based on the default filesystem impl 1797 Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); 1798 String logName = AbstractFSWALProvider.getWALDirectoryName(this.serverName.toString()); 1799 1800 Path logDir = new Path(walRootDir, logName); 1801 LOG.debug("logDir={}", logDir); 1802 if (this.walFs.exists(logDir)) { 1803 throw new RegionServerRunningException( 1804 "Region server has already created directory at " + this.serverName.toString()); 1805 } 1806 // Create wal directory here and we will never create it again in other places. This is 1807 // important to make sure that our fencing way takes effect. See HBASE-29797 for more details. 1808 if (!this.walFs.mkdirs(logDir)) { 1809 throw new IOException("Can not create wal directory " + logDir); 1810 } 1811 // Instantiate replication if replication enabled. Pass it the log directories. 1812 createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, factory); 1813 1814 WALActionsListener walEventListener = getWALEventTrackerListener(conf); 1815 if (walEventListener != null && factory.getWALProvider() != null) { 1816 factory.getWALProvider().addWALActionsListener(walEventListener); 1817 } 1818 this.walFactory = factory; 1819 } 1820 1821 private WALActionsListener getWALEventTrackerListener(Configuration conf) { 1822 if (conf.getBoolean(WAL_EVENT_TRACKER_ENABLED_KEY, WAL_EVENT_TRACKER_ENABLED_DEFAULT)) { 1823 WALEventTrackerListener listener = 1824 new WALEventTrackerListener(conf, getNamedQueueRecorder(), getServerName()); 1825 return listener; 1826 } 1827 return null; 1828 } 1829 1830 /** 1831 * Start up replication source and sink handlers. 1832 */ 1833 private void startReplicationService() throws IOException { 1834 if (sameReplicationSourceAndSink && this.replicationSourceHandler != null) { 1835 this.replicationSourceHandler.startReplicationService(); 1836 } else { 1837 if (this.replicationSourceHandler != null) { 1838 this.replicationSourceHandler.startReplicationService(); 1839 } 1840 if (this.replicationSinkHandler != null) { 1841 this.replicationSinkHandler.startReplicationService(); 1842 } 1843 } 1844 } 1845 1846 /** Returns Master address tracker instance. */ 1847 public MasterAddressTracker getMasterAddressTracker() { 1848 return this.masterAddressTracker; 1849 } 1850 1851 /** 1852 * Start maintenance Threads, Server, Worker and lease checker threads. Start all threads we need 1853 * to run. This is called after we've successfully registered with the Master. Install an 1854 * UncaughtExceptionHandler that calls abort of RegionServer if we get an unhandled exception. We 1855 * cannot set the handler on all threads. Server's internal Listener thread is off limits. For 1856 * Server, if an OOME, it waits a while then retries. Meantime, a flush or a compaction that tries 1857 * to run should trigger same critical condition and the shutdown will run. On its way out, this 1858 * server will shut down Server. Leases are sort of inbetween. It has an internal thread that 1859 * while it inherits from Chore, it keeps its own internal stop mechanism so needs to be stopped 1860 * by this hosting server. Worker logs the exception and exits. 1861 */ 1862 private void startServices() throws IOException { 1863 if (!isStopped() && !isAborted()) { 1864 initializeThreads(); 1865 } 1866 this.secureBulkLoadManager = new SecureBulkLoadManager(this.conf, asyncClusterConnection); 1867 this.secureBulkLoadManager.start(); 1868 1869 // Health checker thread. 1870 if (isHealthCheckerConfigured()) { 1871 int sleepTime = this.conf.getInt(HConstants.HEALTH_CHORE_WAKE_FREQ, 1872 HConstants.DEFAULT_THREAD_WAKE_FREQUENCY); 1873 healthCheckChore = new HealthCheckChore(sleepTime, this, getConfiguration()); 1874 } 1875 // Executor status collect thread. 1876 if ( 1877 this.conf.getBoolean(HConstants.EXECUTOR_STATUS_COLLECT_ENABLED, 1878 HConstants.DEFAULT_EXECUTOR_STATUS_COLLECT_ENABLED) 1879 ) { 1880 int sleepTime = 1881 this.conf.getInt(ExecutorStatusChore.WAKE_FREQ, ExecutorStatusChore.DEFAULT_WAKE_FREQ); 1882 executorStatusChore = new ExecutorStatusChore(sleepTime, this, this.getExecutorService(), 1883 this.metricsRegionServer.getMetricsSource()); 1884 } 1885 1886 this.walRoller = new LogRoller(this); 1887 this.flushThroughputController = FlushThroughputControllerFactory.create(this, conf); 1888 this.procedureResultReporter = new RemoteProcedureResultReporter(this); 1889 1890 // Create the CompactedFileDischarger chore executorService. This chore helps to 1891 // remove the compacted files that will no longer be used in reads. 1892 // Default is 2 mins. The default value for TTLCleaner is 5 mins so we set this to 1893 // 2 mins so that compacted files can be archived before the TTLCleaner runs 1894 int cleanerInterval = conf.getInt("hbase.hfile.compaction.discharger.interval", 2 * 60 * 1000); 1895 this.compactedFileDischarger = new CompactedHFilesDischarger(cleanerInterval, this, this); 1896 choreService.scheduleChore(compactedFileDischarger); 1897 1898 // Start executor services 1899 final int openRegionThreads = conf.getInt("hbase.regionserver.executor.openregion.threads", 3); 1900 executorService.startExecutorService(executorService.new ExecutorConfig() 1901 .setExecutorType(ExecutorType.RS_OPEN_REGION).setCorePoolSize(openRegionThreads)); 1902 final int openMetaThreads = conf.getInt("hbase.regionserver.executor.openmeta.threads", 1); 1903 executorService.startExecutorService(executorService.new ExecutorConfig() 1904 .setExecutorType(ExecutorType.RS_OPEN_META).setCorePoolSize(openMetaThreads)); 1905 final int openPriorityRegionThreads = 1906 conf.getInt("hbase.regionserver.executor.openpriorityregion.threads", 3); 1907 executorService.startExecutorService( 1908 executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_OPEN_PRIORITY_REGION) 1909 .setCorePoolSize(openPriorityRegionThreads)); 1910 final int closeRegionThreads = 1911 conf.getInt("hbase.regionserver.executor.closeregion.threads", 3); 1912 executorService.startExecutorService(executorService.new ExecutorConfig() 1913 .setExecutorType(ExecutorType.RS_CLOSE_REGION).setCorePoolSize(closeRegionThreads)); 1914 final int closeMetaThreads = conf.getInt("hbase.regionserver.executor.closemeta.threads", 1); 1915 executorService.startExecutorService(executorService.new ExecutorConfig() 1916 .setExecutorType(ExecutorType.RS_CLOSE_META).setCorePoolSize(closeMetaThreads)); 1917 if (conf.getBoolean(StoreScanner.STORESCANNER_PARALLEL_SEEK_ENABLE, false)) { 1918 final int storeScannerParallelSeekThreads = 1919 conf.getInt("hbase.storescanner.parallel.seek.threads", 10); 1920 executorService.startExecutorService( 1921 executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_PARALLEL_SEEK) 1922 .setCorePoolSize(storeScannerParallelSeekThreads).setAllowCoreThreadTimeout(true)); 1923 } 1924 final int logReplayOpsThreads = 1925 conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER); 1926 executorService.startExecutorService( 1927 executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_LOG_REPLAY_OPS) 1928 .setCorePoolSize(logReplayOpsThreads).setAllowCoreThreadTimeout(true)); 1929 // Start the threads for compacted files discharger 1930 final int compactionDischargerThreads = 1931 conf.getInt(CompactionConfiguration.HBASE_HFILE_COMPACTION_DISCHARGER_THREAD_COUNT, 10); 1932 executorService.startExecutorService(executorService.new ExecutorConfig() 1933 .setExecutorType(ExecutorType.RS_COMPACTED_FILES_DISCHARGER) 1934 .setCorePoolSize(compactionDischargerThreads)); 1935 if (ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled(conf)) { 1936 final int regionReplicaFlushThreads = 1937 conf.getInt("hbase.regionserver.region.replica.flusher.threads", 1938 conf.getInt("hbase.regionserver.executor.openregion.threads", 3)); 1939 executorService.startExecutorService(executorService.new ExecutorConfig() 1940 .setExecutorType(ExecutorType.RS_REGION_REPLICA_FLUSH_OPS) 1941 .setCorePoolSize(regionReplicaFlushThreads)); 1942 } 1943 final int refreshPeerThreads = 1944 conf.getInt("hbase.regionserver.executor.refresh.peer.threads", 2); 1945 executorService.startExecutorService(executorService.new ExecutorConfig() 1946 .setExecutorType(ExecutorType.RS_REFRESH_PEER).setCorePoolSize(refreshPeerThreads)); 1947 final int replaySyncReplicationWALThreads = 1948 conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 1); 1949 executorService.startExecutorService(executorService.new ExecutorConfig() 1950 .setExecutorType(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL) 1951 .setCorePoolSize(replaySyncReplicationWALThreads)); 1952 final int switchRpcThrottleThreads = 1953 conf.getInt("hbase.regionserver.executor.switch.rpc.throttle.threads", 1); 1954 executorService.startExecutorService( 1955 executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_SWITCH_RPC_THROTTLE) 1956 .setCorePoolSize(switchRpcThrottleThreads)); 1957 final int claimReplicationQueueThreads = 1958 conf.getInt("hbase.regionserver.executor.claim.replication.queue.threads", 1); 1959 executorService.startExecutorService( 1960 executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_CLAIM_REPLICATION_QUEUE) 1961 .setCorePoolSize(claimReplicationQueueThreads)); 1962 final int rsSnapshotOperationThreads = 1963 conf.getInt("hbase.regionserver.executor.snapshot.operations.threads", 3); 1964 executorService.startExecutorService( 1965 executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_SNAPSHOT_OPERATIONS) 1966 .setCorePoolSize(rsSnapshotOperationThreads)); 1967 final int rsFlushOperationThreads = 1968 conf.getInt("hbase.regionserver.executor.flush.operations.threads", 3); 1969 executorService.startExecutorService(executorService.new ExecutorConfig() 1970 .setExecutorType(ExecutorType.RS_FLUSH_OPERATIONS).setCorePoolSize(rsFlushOperationThreads)); 1971 final int rsRefreshQuotasThreads = 1972 conf.getInt("hbase.regionserver.executor.refresh.quotas.threads", 1); 1973 executorService.startExecutorService( 1974 executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_RELOAD_QUOTAS_OPERATIONS) 1975 .setCorePoolSize(rsRefreshQuotasThreads)); 1976 final int logRollThreads = conf.getInt("hbase.regionserver.executor.log.roll.threads", 1); 1977 executorService.startExecutorService(executorService.new ExecutorConfig() 1978 .setExecutorType(ExecutorType.RS_LOG_ROLL).setCorePoolSize(logRollThreads)); 1979 1980 Threads.setDaemonThreadRunning(this.walRoller, getName() + ".logRoller", 1981 uncaughtExceptionHandler); 1982 if (this.cacheFlusher != null) { 1983 this.cacheFlusher.start(uncaughtExceptionHandler); 1984 } 1985 Threads.setDaemonThreadRunning(this.procedureResultReporter, 1986 getName() + ".procedureResultReporter", uncaughtExceptionHandler); 1987 1988 if (this.compactionChecker != null) { 1989 choreService.scheduleChore(compactionChecker); 1990 } 1991 if (this.periodicFlusher != null) { 1992 choreService.scheduleChore(periodicFlusher); 1993 } 1994 if (this.healthCheckChore != null) { 1995 choreService.scheduleChore(healthCheckChore); 1996 } 1997 if (this.executorStatusChore != null) { 1998 choreService.scheduleChore(executorStatusChore); 1999 } 2000 if (this.nonceManagerChore != null) { 2001 choreService.scheduleChore(nonceManagerChore); 2002 } 2003 if (this.storefileRefresher != null) { 2004 choreService.scheduleChore(storefileRefresher); 2005 } 2006 if (this.fsUtilizationChore != null) { 2007 choreService.scheduleChore(fsUtilizationChore); 2008 } 2009 if (this.namedQueueServiceChore != null) { 2010 choreService.scheduleChore(namedQueueServiceChore); 2011 } 2012 if (this.brokenStoreFileCleaner != null) { 2013 choreService.scheduleChore(brokenStoreFileCleaner); 2014 } 2015 if (this.rsMobFileCleanerChore != null) { 2016 choreService.scheduleChore(rsMobFileCleanerChore); 2017 } 2018 if (replicationMarkerChore != null) { 2019 LOG.info("Starting replication marker chore"); 2020 choreService.scheduleChore(replicationMarkerChore); 2021 } 2022 2023 // Leases is not a Thread. Internally it runs a daemon thread. If it gets 2024 // an unhandled exception, it will just exit. 2025 Threads.setDaemonThreadRunning(this.leaseManager, getName() + ".leaseChecker", 2026 uncaughtExceptionHandler); 2027 2028 // Create the log splitting worker and start it 2029 // set a smaller retries to fast fail otherwise splitlogworker could be blocked for 2030 // quite a while inside Connection layer. The worker won't be available for other 2031 // tasks even after current task is preempted after a split task times out. 2032 Configuration sinkConf = HBaseConfiguration.create(conf); 2033 sinkConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2034 conf.getInt("hbase.log.replay.retries.number", 8)); // 8 retries take about 23 seconds 2035 sinkConf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 2036 conf.getInt("hbase.log.replay.rpc.timeout", 30000)); // default 30 seconds 2037 sinkConf.setInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER, 1); 2038 if ( 2039 this.csm != null 2040 && conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK) 2041 ) { 2042 // SplitLogWorker needs csm. If none, don't start this. 2043 this.splitLogWorker = new SplitLogWorker(sinkConf, this, this, walFactory); 2044 splitLogWorker.start(); 2045 LOG.debug("SplitLogWorker started"); 2046 } 2047 2048 // Memstore services. 2049 startHeapMemoryManager(); 2050 // Call it after starting HeapMemoryManager. 2051 initializeMemStoreChunkCreator(hMemManager); 2052 } 2053 2054 private void initializeThreads() { 2055 // Cache flushing thread. 2056 this.cacheFlusher = new MemStoreFlusher(conf, this); 2057 2058 // Compaction thread 2059 this.compactSplitThread = new CompactSplit(this); 2060 2061 // Prefetch Notifier 2062 this.prefetchExecutorNotifier = new PrefetchExecutorNotifier(conf); 2063 2064 // Background thread to check for compactions; needed if region has not gotten updates 2065 // in a while. It will take care of not checking too frequently on store-by-store basis. 2066 this.compactionChecker = new CompactionChecker(this, this.compactionCheckFrequency, this); 2067 this.periodicFlusher = new PeriodicMemStoreFlusher(this.flushCheckFrequency, this); 2068 this.leaseManager = new LeaseManager(this.threadWakeFrequency); 2069 2070 final boolean isSlowLogTableEnabled = conf.getBoolean(HConstants.SLOW_LOG_SYS_TABLE_ENABLED_KEY, 2071 HConstants.DEFAULT_SLOW_LOG_SYS_TABLE_ENABLED_KEY); 2072 final boolean walEventTrackerEnabled = 2073 conf.getBoolean(WAL_EVENT_TRACKER_ENABLED_KEY, WAL_EVENT_TRACKER_ENABLED_DEFAULT); 2074 2075 if (isSlowLogTableEnabled || walEventTrackerEnabled) { 2076 // default chore duration: 10 min 2077 // After <version number>, we will remove hbase.slowlog.systable.chore.duration conf property 2078 final int slowLogChoreDuration = conf.getInt(HConstants.SLOW_LOG_SYS_TABLE_CHORE_DURATION_KEY, 2079 DEFAULT_SLOW_LOG_SYS_TABLE_CHORE_DURATION); 2080 2081 final int namedQueueChoreDuration = 2082 conf.getInt(NAMED_QUEUE_CHORE_DURATION_KEY, NAMED_QUEUE_CHORE_DURATION_DEFAULT); 2083 // Considering min of slowLogChoreDuration and namedQueueChoreDuration 2084 int choreDuration = Math.min(slowLogChoreDuration, namedQueueChoreDuration); 2085 2086 namedQueueServiceChore = new NamedQueueServiceChore(this, choreDuration, 2087 this.namedQueueRecorder, this.getConnection()); 2088 } 2089 2090 if (this.nonceManager != null) { 2091 // Create the scheduled chore that cleans up nonces. 2092 nonceManagerChore = this.nonceManager.createCleanupScheduledChore(this); 2093 } 2094 2095 // Setup the Quota Manager 2096 rsQuotaManager = new RegionServerRpcQuotaManager(this); 2097 configurationManager.registerObserver(rsQuotaManager); 2098 rsSpaceQuotaManager = new RegionServerSpaceQuotaManager(this); 2099 2100 if (QuotaUtil.isQuotaEnabled(conf)) { 2101 this.fsUtilizationChore = new FileSystemUtilizationChore(this); 2102 } 2103 2104 boolean onlyMetaRefresh = false; 2105 int storefileRefreshPeriod = 2106 conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 2107 StorefileRefresherChore.DEFAULT_REGIONSERVER_STOREFILE_REFRESH_PERIOD); 2108 if (storefileRefreshPeriod == 0) { 2109 storefileRefreshPeriod = 2110 conf.getInt(StorefileRefresherChore.REGIONSERVER_META_STOREFILE_REFRESH_PERIOD, 2111 StorefileRefresherChore.DEFAULT_REGIONSERVER_STOREFILE_REFRESH_PERIOD); 2112 onlyMetaRefresh = true; 2113 } 2114 if (storefileRefreshPeriod > 0) { 2115 this.storefileRefresher = 2116 new StorefileRefresherChore(storefileRefreshPeriod, onlyMetaRefresh, this, this); 2117 } 2118 2119 int brokenStoreFileCleanerPeriod = 2120 conf.getInt(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_PERIOD, 2121 BrokenStoreFileCleaner.DEFAULT_BROKEN_STOREFILE_CLEANER_PERIOD); 2122 int brokenStoreFileCleanerDelay = 2123 conf.getInt(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_DELAY, 2124 BrokenStoreFileCleaner.DEFAULT_BROKEN_STOREFILE_CLEANER_DELAY); 2125 double brokenStoreFileCleanerDelayJitter = 2126 conf.getDouble(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_DELAY_JITTER, 2127 BrokenStoreFileCleaner.DEFAULT_BROKEN_STOREFILE_CLEANER_DELAY_JITTER); 2128 double jitterRate = 2129 (ThreadLocalRandom.current().nextDouble() - 0.5D) * brokenStoreFileCleanerDelayJitter; 2130 long jitterValue = Math.round(brokenStoreFileCleanerDelay * jitterRate); 2131 this.brokenStoreFileCleaner = 2132 new BrokenStoreFileCleaner((int) (brokenStoreFileCleanerDelay + jitterValue), 2133 brokenStoreFileCleanerPeriod, this, conf, this); 2134 2135 this.rsMobFileCleanerChore = new RSMobFileCleanerChore(this); 2136 2137 registerConfigurationObservers(); 2138 initializeReplicationMarkerChore(); 2139 } 2140 2141 private void registerConfigurationObservers() { 2142 // Register Replication if possible, as now we support recreating replication peer storage, for 2143 // migrating across different replication peer storages online 2144 if (replicationSourceHandler instanceof ConfigurationObserver) { 2145 configurationManager.registerObserver((ConfigurationObserver) replicationSourceHandler); 2146 } 2147 if (!sameReplicationSourceAndSink && replicationSinkHandler instanceof ConfigurationObserver) { 2148 configurationManager.registerObserver((ConfigurationObserver) replicationSinkHandler); 2149 } 2150 // Registering the compactSplitThread object with the ConfigurationManager. 2151 configurationManager.registerObserver(this.compactSplitThread); 2152 configurationManager.registerObserver(this.cacheFlusher); 2153 configurationManager.registerObserver(this.rpcServices); 2154 configurationManager.registerObserver(this.prefetchExecutorNotifier); 2155 configurationManager.registerObserver(this); 2156 } 2157 2158 /* 2159 * Verify that server is healthy 2160 */ 2161 private boolean isHealthy() { 2162 if (!dataFsOk) { 2163 // File system problem 2164 return false; 2165 } 2166 // Verify that all threads are alive 2167 boolean healthy = (this.leaseManager == null || this.leaseManager.isAlive()) 2168 && (this.cacheFlusher == null || this.cacheFlusher.isAlive()) 2169 && (this.walRoller == null || this.walRoller.isAlive()) 2170 && (this.compactionChecker == null || this.compactionChecker.isScheduled()) 2171 && (this.periodicFlusher == null || this.periodicFlusher.isScheduled()); 2172 if (!healthy) { 2173 stop("One or more threads are no longer alive -- stop"); 2174 } 2175 return healthy; 2176 } 2177 2178 @Override 2179 public List<WAL> getWALs() { 2180 return walFactory.getWALs(); 2181 } 2182 2183 @Override 2184 public WAL getWAL(RegionInfo regionInfo) throws IOException { 2185 WAL wal = walFactory.getWAL(regionInfo); 2186 if (this.walRoller != null) { 2187 this.walRoller.addWAL(wal); 2188 } 2189 return wal; 2190 } 2191 2192 public LogRoller getWalRoller() { 2193 return walRoller; 2194 } 2195 2196 public WALFactory getWalFactory() { 2197 return walFactory; 2198 } 2199 2200 @Override 2201 public void stop(final String msg) { 2202 stop(msg, false, RpcServer.getRequestUser().orElse(null)); 2203 } 2204 2205 /** 2206 * Stops the regionserver. 2207 * @param msg Status message 2208 * @param force True if this is a regionserver abort 2209 * @param user The user executing the stop request, or null if no user is associated 2210 */ 2211 public void stop(final String msg, final boolean force, final User user) { 2212 if (!this.stopped) { 2213 LOG.info("***** STOPPING region server '{}' *****", this); 2214 if (this.rsHost != null) { 2215 // when forced via abort don't allow CPs to override 2216 try { 2217 this.rsHost.preStop(msg, user); 2218 } catch (IOException ioe) { 2219 if (!force) { 2220 LOG.warn("The region server did not stop", ioe); 2221 return; 2222 } 2223 LOG.warn("Skipping coprocessor exception on preStop() due to forced shutdown", ioe); 2224 } 2225 } 2226 this.stopped = true; 2227 LOG.info("STOPPED: " + msg); 2228 // Wakes run() if it is sleeping 2229 sleeper.skipSleepCycle(); 2230 } 2231 } 2232 2233 public void waitForServerOnline() { 2234 while (!isStopped() && !isOnline()) { 2235 synchronized (online) { 2236 try { 2237 online.wait(msgInterval); 2238 } catch (InterruptedException ie) { 2239 Thread.currentThread().interrupt(); 2240 break; 2241 } 2242 } 2243 } 2244 } 2245 2246 @Override 2247 public void postOpenDeployTasks(final PostOpenDeployContext context) throws IOException { 2248 HRegion r = context.getRegion(); 2249 long openProcId = context.getOpenProcId(); 2250 long masterSystemTime = context.getMasterSystemTime(); 2251 long initiatingMasterActiveTime = context.getInitiatingMasterActiveTime(); 2252 rpcServices.checkOpen(); 2253 LOG.info("Post open deploy tasks for {}, pid={}, masterSystemTime={}", 2254 r.getRegionInfo().getRegionNameAsString(), openProcId, masterSystemTime); 2255 // Do checks to see if we need to compact (references or too many files) 2256 // Skip compaction check if region is read only 2257 if (!r.isReadOnly()) { 2258 for (HStore s : r.stores.values()) { 2259 if (s.hasReferences() || s.needsCompaction()) { 2260 this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region"); 2261 } 2262 } 2263 } 2264 long openSeqNum = r.getOpenSeqNum(); 2265 if (openSeqNum == HConstants.NO_SEQNUM) { 2266 // If we opened a region, we should have read some sequence number from it. 2267 LOG.error( 2268 "No sequence number found when opening " + r.getRegionInfo().getRegionNameAsString()); 2269 openSeqNum = 0; 2270 } 2271 2272 // Notify master 2273 if ( 2274 !reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.OPENED, 2275 openSeqNum, openProcId, masterSystemTime, r.getRegionInfo(), initiatingMasterActiveTime)) 2276 ) { 2277 throw new IOException( 2278 "Failed to report opened region to master: " + r.getRegionInfo().getRegionNameAsString()); 2279 } 2280 2281 triggerFlushInPrimaryRegion(r); 2282 2283 LOG.debug("Finished post open deploy task for " + r.getRegionInfo().getRegionNameAsString()); 2284 } 2285 2286 /** 2287 * Helper method for use in tests. Skip the region transition report when there's no master around 2288 * to receive it. 2289 */ 2290 private boolean skipReportingTransition(final RegionStateTransitionContext context) { 2291 final TransitionCode code = context.getCode(); 2292 final long openSeqNum = context.getOpenSeqNum(); 2293 long masterSystemTime = context.getMasterSystemTime(); 2294 final RegionInfo[] hris = context.getHris(); 2295 2296 if (code == TransitionCode.OPENED) { 2297 Preconditions.checkArgument(hris != null && hris.length == 1); 2298 if (hris[0].isMetaRegion()) { 2299 LOG.warn( 2300 "meta table location is stored in master local store, so we can not skip reporting"); 2301 return false; 2302 } else { 2303 try { 2304 MetaTableAccessor.updateRegionLocation(asyncClusterConnection.toConnection(), hris[0], 2305 serverName, openSeqNum, masterSystemTime); 2306 } catch (IOException e) { 2307 LOG.info("Failed to update meta", e); 2308 return false; 2309 } 2310 } 2311 } 2312 return true; 2313 } 2314 2315 private ReportRegionStateTransitionRequest 2316 createReportRegionStateTransitionRequest(final RegionStateTransitionContext context) { 2317 final TransitionCode code = context.getCode(); 2318 final long openSeqNum = context.getOpenSeqNum(); 2319 final RegionInfo[] hris = context.getHris(); 2320 final long[] procIds = context.getProcIds(); 2321 2322 ReportRegionStateTransitionRequest.Builder builder = 2323 ReportRegionStateTransitionRequest.newBuilder(); 2324 builder.setServer(ProtobufUtil.toServerName(serverName)); 2325 RegionStateTransition.Builder transition = builder.addTransitionBuilder(); 2326 transition.setTransitionCode(code); 2327 if (code == TransitionCode.OPENED && openSeqNum >= 0) { 2328 transition.setOpenSeqNum(openSeqNum); 2329 } 2330 for (RegionInfo hri : hris) { 2331 transition.addRegionInfo(ProtobufUtil.toRegionInfo(hri)); 2332 } 2333 for (long procId : procIds) { 2334 transition.addProcId(procId); 2335 } 2336 transition.setInitiatingMasterActiveTime(context.getInitiatingMasterActiveTime()); 2337 2338 return builder.build(); 2339 } 2340 2341 @Override 2342 public boolean reportRegionStateTransition(final RegionStateTransitionContext context) { 2343 if (TEST_SKIP_REPORTING_TRANSITION) { 2344 return skipReportingTransition(context); 2345 } 2346 final ReportRegionStateTransitionRequest request = 2347 createReportRegionStateTransitionRequest(context); 2348 2349 int tries = 0; 2350 long pauseTime = this.retryPauseTime; 2351 // Keep looping till we get an error. We want to send reports even though server is going down. 2352 // Only go down if clusterConnection is null. It is set to null almost as last thing as the 2353 // HRegionServer does down. 2354 while (this.asyncClusterConnection != null && !this.asyncClusterConnection.isClosed()) { 2355 RegionServerStatusService.BlockingInterface rss = rssStub; 2356 try { 2357 if (rss == null) { 2358 createRegionServerStatusStub(); 2359 continue; 2360 } 2361 ReportRegionStateTransitionResponse response = 2362 rss.reportRegionStateTransition(null, request); 2363 if (response.hasErrorMessage()) { 2364 LOG.info("TRANSITION FAILED " + request + ": " + response.getErrorMessage()); 2365 break; 2366 } 2367 // Log if we had to retry else don't log unless TRACE. We want to 2368 // know if were successful after an attempt showed in logs as failed. 2369 if (tries > 0 || LOG.isTraceEnabled()) { 2370 LOG.info("TRANSITION REPORTED " + request); 2371 } 2372 // NOTE: Return mid-method!!! 2373 return true; 2374 } catch (ServiceException se) { 2375 IOException ioe = ProtobufUtil.getRemoteException(se); 2376 boolean pause = ioe instanceof ServerNotRunningYetException 2377 || ioe instanceof PleaseHoldException || ioe instanceof CallQueueTooBigException; 2378 if (pause) { 2379 // Do backoff else we flood the Master with requests. 2380 pauseTime = ConnectionUtils.getPauseTime(this.retryPauseTime, tries); 2381 } else { 2382 pauseTime = this.retryPauseTime; // Reset. 2383 } 2384 LOG.info("Failed report transition " + TextFormat.shortDebugString(request) + "; retry (#" 2385 + tries + ")" 2386 + (pause 2387 ? " after " + pauseTime + "ms delay (Master is coming online...)." 2388 : " immediately."), 2389 ioe); 2390 if (pause) { 2391 Threads.sleep(pauseTime); 2392 } 2393 tries++; 2394 if (rssStub == rss) { 2395 rssStub = null; 2396 } 2397 } 2398 } 2399 return false; 2400 } 2401 2402 /** 2403 * Trigger a flush in the primary region replica if this region is a secondary replica. Does not 2404 * block this thread. See RegionReplicaFlushHandler for details. 2405 */ 2406 private void triggerFlushInPrimaryRegion(final HRegion region) { 2407 if (ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo())) { 2408 return; 2409 } 2410 TableName tn = region.getTableDescriptor().getTableName(); 2411 if ( 2412 !ServerRegionReplicaUtil.isRegionReplicaReplicationEnabled(region.conf, tn) 2413 || !ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled(region.conf) || 2414 // If the memstore replication not setup, we do not have to wait for observing a flush event 2415 // from primary before starting to serve reads, because gaps from replication is not 2416 // applicable,this logic is from 2417 // TableDescriptorBuilder.ModifyableTableDescriptor.setRegionMemStoreReplication by 2418 // HBASE-13063 2419 !region.getTableDescriptor().hasRegionMemStoreReplication() 2420 ) { 2421 region.setReadsEnabled(true); 2422 return; 2423 } 2424 2425 region.setReadsEnabled(false); // disable reads before marking the region as opened. 2426 // RegionReplicaFlushHandler might reset this. 2427 2428 // Submit it to be handled by one of the handlers so that we do not block OpenRegionHandler 2429 if (this.executorService != null) { 2430 this.executorService.submit(new RegionReplicaFlushHandler(this, region)); 2431 } else { 2432 LOG.info("Executor is null; not running flush of primary region replica for {}", 2433 region.getRegionInfo()); 2434 } 2435 } 2436 2437 @InterfaceAudience.Private 2438 public RSRpcServices getRSRpcServices() { 2439 return rpcServices; 2440 } 2441 2442 /** 2443 * Cause the server to exit without closing the regions it is serving, the log it is using and 2444 * without notifying the master. Used unit testing and on catastrophic events such as HDFS is 2445 * yanked out from under hbase or we OOME. the reason we are aborting the exception that caused 2446 * the abort, or null 2447 */ 2448 @Override 2449 public void abort(String reason, Throwable cause) { 2450 if (!setAbortRequested()) { 2451 // Abort already in progress, ignore the new request. 2452 LOG.debug("Abort already in progress. Ignoring the current request with reason: {}", reason); 2453 return; 2454 } 2455 String msg = "***** ABORTING region server " + this + ": " + reason + " *****"; 2456 if (cause != null) { 2457 LOG.error(HBaseMarkers.FATAL, msg, cause); 2458 } else { 2459 LOG.error(HBaseMarkers.FATAL, msg); 2460 } 2461 // HBASE-4014: show list of coprocessors that were loaded to help debug 2462 // regionserver crashes.Note that we're implicitly using 2463 // java.util.HashSet's toString() method to print the coprocessor names. 2464 LOG.error(HBaseMarkers.FATAL, 2465 "RegionServer abort: loaded coprocessors are: " + CoprocessorHost.getLoadedCoprocessors()); 2466 // Try and dump metrics if abort -- might give clue as to how fatal came about.... 2467 try { 2468 LOG.info("Dump of metrics as JSON on abort: " + DumpRegionServerMetrics.dumpMetrics()); 2469 } catch (MalformedObjectNameException | IOException e) { 2470 LOG.warn("Failed dumping metrics", e); 2471 } 2472 2473 // Do our best to report our abort to the master, but this may not work 2474 try { 2475 if (cause != null) { 2476 msg += "\nCause:\n" + Throwables.getStackTraceAsString(cause); 2477 } 2478 // Report to the master but only if we have already registered with the master. 2479 RegionServerStatusService.BlockingInterface rss = rssStub; 2480 if (rss != null && this.serverName != null) { 2481 ReportRSFatalErrorRequest.Builder builder = ReportRSFatalErrorRequest.newBuilder(); 2482 builder.setServer(ProtobufUtil.toServerName(this.serverName)); 2483 builder.setErrorMessage(msg); 2484 rss.reportRSFatalError(null, builder.build()); 2485 } 2486 } catch (Throwable t) { 2487 LOG.warn("Unable to report fatal error to master", t); 2488 } 2489 2490 scheduleAbortTimer(); 2491 // shutdown should be run as the internal user 2492 stop(reason, true, null); 2493 } 2494 2495 /* 2496 * Simulate a kill -9 of this server. Exits w/o closing regions or cleaninup logs but it does 2497 * close socket in case want to bring up server on old hostname+port immediately. 2498 */ 2499 @InterfaceAudience.Private 2500 protected void kill() { 2501 this.killed = true; 2502 abort("Simulated kill"); 2503 } 2504 2505 // Limits the time spent in the shutdown process. 2506 private void scheduleAbortTimer() { 2507 if (this.abortMonitor == null) { 2508 this.abortMonitor = new Timer("Abort regionserver monitor", true); 2509 TimerTask abortTimeoutTask = null; 2510 try { 2511 Constructor<? extends TimerTask> timerTaskCtor = 2512 Class.forName(conf.get(ABORT_TIMEOUT_TASK, SystemExitWhenAbortTimeout.class.getName())) 2513 .asSubclass(TimerTask.class).getDeclaredConstructor(); 2514 timerTaskCtor.setAccessible(true); 2515 abortTimeoutTask = timerTaskCtor.newInstance(); 2516 } catch (Exception e) { 2517 LOG.warn("Initialize abort timeout task failed", e); 2518 } 2519 if (abortTimeoutTask != null) { 2520 abortMonitor.schedule(abortTimeoutTask, conf.getLong(ABORT_TIMEOUT, DEFAULT_ABORT_TIMEOUT)); 2521 } 2522 } 2523 } 2524 2525 /** 2526 * Wait on all threads to finish. Presumption is that all closes and stops have already been 2527 * called. 2528 */ 2529 protected void stopServiceThreads() { 2530 // clean up the scheduled chores 2531 stopChoreService(); 2532 if (bootstrapNodeManager != null) { 2533 bootstrapNodeManager.stop(); 2534 } 2535 if (this.cacheFlusher != null) { 2536 this.cacheFlusher.shutdown(); 2537 } 2538 if (this.walRoller != null) { 2539 this.walRoller.close(); 2540 } 2541 if (this.compactSplitThread != null) { 2542 this.compactSplitThread.join(); 2543 } 2544 stopExecutorService(); 2545 if (sameReplicationSourceAndSink && this.replicationSourceHandler != null) { 2546 this.replicationSourceHandler.stopReplicationService(); 2547 } else { 2548 if (this.replicationSourceHandler != null) { 2549 this.replicationSourceHandler.stopReplicationService(); 2550 } 2551 if (this.replicationSinkHandler != null) { 2552 this.replicationSinkHandler.stopReplicationService(); 2553 } 2554 } 2555 } 2556 2557 /** Returns Return the object that implements the replication source executorService. */ 2558 @Override 2559 public ReplicationSourceService getReplicationSourceService() { 2560 return replicationSourceHandler; 2561 } 2562 2563 /** Returns Return the object that implements the replication sink executorService. */ 2564 public ReplicationSinkService getReplicationSinkService() { 2565 return replicationSinkHandler; 2566 } 2567 2568 /** 2569 * Get the current master from ZooKeeper and open the RPC connection to it. To get a fresh 2570 * connection, the current rssStub must be null. Method will block until a master is available. 2571 * You can break from this block by requesting the server stop. 2572 * @return master + port, or null if server has been stopped 2573 */ 2574 private synchronized ServerName createRegionServerStatusStub() { 2575 // Create RS stub without refreshing the master node from ZK, use cached data 2576 return createRegionServerStatusStub(false); 2577 } 2578 2579 /** 2580 * Get the current master from ZooKeeper and open the RPC connection to it. To get a fresh 2581 * connection, the current rssStub must be null. Method will block until a master is available. 2582 * You can break from this block by requesting the server stop. 2583 * @param refresh If true then master address will be read from ZK, otherwise use cached data 2584 * @return master + port, or null if server has been stopped 2585 */ 2586 @InterfaceAudience.Private 2587 protected synchronized ServerName createRegionServerStatusStub(boolean refresh) { 2588 if (rssStub != null) { 2589 return masterAddressTracker.getMasterAddress(); 2590 } 2591 ServerName sn = null; 2592 long previousLogTime = 0; 2593 RegionServerStatusService.BlockingInterface intRssStub = null; 2594 LockService.BlockingInterface intLockStub = null; 2595 boolean interrupted = false; 2596 try { 2597 while (keepLooping()) { 2598 sn = this.masterAddressTracker.getMasterAddress(refresh); 2599 if (sn == null) { 2600 if (!keepLooping()) { 2601 // give up with no connection. 2602 LOG.debug("No master found and cluster is stopped; bailing out"); 2603 return null; 2604 } 2605 if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) { 2606 LOG.debug("No master found; retry"); 2607 previousLogTime = EnvironmentEdgeManager.currentTime(); 2608 } 2609 refresh = true; // let's try pull it from ZK directly 2610 if (sleepInterrupted(200)) { 2611 interrupted = true; 2612 } 2613 continue; 2614 } 2615 try { 2616 BlockingRpcChannel channel = this.rpcClient.createBlockingRpcChannel(sn, 2617 userProvider.getCurrent(), shortOperationTimeout); 2618 intRssStub = RegionServerStatusService.newBlockingStub(channel); 2619 intLockStub = LockService.newBlockingStub(channel); 2620 break; 2621 } catch (IOException e) { 2622 if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) { 2623 e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e; 2624 if (e instanceof ServerNotRunningYetException) { 2625 LOG.info("Master isn't available yet, retrying"); 2626 } else { 2627 LOG.warn("Unable to connect to master. Retrying. Error was:", e); 2628 } 2629 previousLogTime = EnvironmentEdgeManager.currentTime(); 2630 } 2631 if (sleepInterrupted(200)) { 2632 interrupted = true; 2633 } 2634 } 2635 } 2636 } finally { 2637 if (interrupted) { 2638 Thread.currentThread().interrupt(); 2639 } 2640 } 2641 this.rssStub = intRssStub; 2642 this.lockStub = intLockStub; 2643 return sn; 2644 } 2645 2646 /** 2647 * @return True if we should break loop because cluster is going down or this server has been 2648 * stopped or hdfs has gone bad. 2649 */ 2650 private boolean keepLooping() { 2651 return !this.stopped && isClusterUp(); 2652 } 2653 2654 /* 2655 * Let the master know we're here Run initialization using parameters passed us by the master. 2656 * @return A Map of key/value configurations we got from the Master else null if we failed to 2657 * register. 2658 */ 2659 private RegionServerStartupResponse reportForDuty() throws IOException { 2660 if (this.masterless) { 2661 return RegionServerStartupResponse.getDefaultInstance(); 2662 } 2663 ServerName masterServerName = createRegionServerStatusStub(true); 2664 RegionServerStatusService.BlockingInterface rss = rssStub; 2665 if (masterServerName == null || rss == null) { 2666 return null; 2667 } 2668 RegionServerStartupResponse result = null; 2669 try { 2670 rpcServices.requestCount.reset(); 2671 rpcServices.rpcGetRequestCount.reset(); 2672 rpcServices.rpcScanRequestCount.reset(); 2673 rpcServices.rpcFullScanRequestCount.reset(); 2674 rpcServices.rpcMultiRequestCount.reset(); 2675 rpcServices.rpcMutateRequestCount.reset(); 2676 LOG.info("reportForDuty to master=" + masterServerName + " with port=" 2677 + rpcServices.getSocketAddress().getPort() + ", startcode=" + this.startcode); 2678 long now = EnvironmentEdgeManager.currentTime(); 2679 int port = rpcServices.getSocketAddress().getPort(); 2680 RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder(); 2681 if (!StringUtils.isBlank(useThisHostnameInstead)) { 2682 request.setUseThisHostnameInstead(useThisHostnameInstead); 2683 } 2684 request.setPort(port); 2685 request.setServerStartCode(this.startcode); 2686 request.setServerCurrentTime(now); 2687 result = rss.regionServerStartup(null, request.build()); 2688 } catch (ServiceException se) { 2689 IOException ioe = ProtobufUtil.getRemoteException(se); 2690 if (ioe instanceof ClockOutOfSyncException) { 2691 LOG.error(HBaseMarkers.FATAL, "Master rejected startup because clock is out of sync", ioe); 2692 // Re-throw IOE will cause RS to abort 2693 throw ioe; 2694 } else if (ioe instanceof DecommissionedHostRejectedException) { 2695 LOG.error(HBaseMarkers.FATAL, 2696 "Master rejected startup because the host is considered decommissioned", ioe); 2697 // Re-throw IOE will cause RS to abort 2698 throw ioe; 2699 } else if (ioe instanceof ServerNotRunningYetException) { 2700 LOG.debug("Master is not running yet"); 2701 } else { 2702 LOG.warn("error telling master we are up", se); 2703 } 2704 rssStub = null; 2705 } 2706 return result; 2707 } 2708 2709 @Override 2710 public RegionStoreSequenceIds getLastSequenceId(byte[] encodedRegionName) { 2711 try { 2712 GetLastFlushedSequenceIdRequest req = 2713 RequestConverter.buildGetLastFlushedSequenceIdRequest(encodedRegionName); 2714 RegionServerStatusService.BlockingInterface rss = rssStub; 2715 if (rss == null) { // Try to connect one more time 2716 createRegionServerStatusStub(); 2717 rss = rssStub; 2718 if (rss == null) { 2719 // Still no luck, we tried 2720 LOG.warn("Unable to connect to the master to check " + "the last flushed sequence id"); 2721 return RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM) 2722 .build(); 2723 } 2724 } 2725 GetLastFlushedSequenceIdResponse resp = rss.getLastFlushedSequenceId(null, req); 2726 return RegionStoreSequenceIds.newBuilder() 2727 .setLastFlushedSequenceId(resp.getLastFlushedSequenceId()) 2728 .addAllStoreSequenceId(resp.getStoreLastFlushedSequenceIdList()).build(); 2729 } catch (ServiceException e) { 2730 LOG.warn("Unable to connect to the master to check the last flushed sequence id", e); 2731 return RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM) 2732 .build(); 2733 } 2734 } 2735 2736 /** 2737 * Close meta region if we carry it 2738 * @param abort Whether we're running an abort. 2739 */ 2740 private void closeMetaTableRegions(final boolean abort) { 2741 HRegion meta = null; 2742 this.onlineRegionsLock.writeLock().lock(); 2743 try { 2744 for (Map.Entry<String, HRegion> e : onlineRegions.entrySet()) { 2745 RegionInfo hri = e.getValue().getRegionInfo(); 2746 if (hri.isMetaRegion()) { 2747 meta = e.getValue(); 2748 } 2749 if (meta != null) { 2750 break; 2751 } 2752 } 2753 } finally { 2754 this.onlineRegionsLock.writeLock().unlock(); 2755 } 2756 if (meta != null) { 2757 closeRegionIgnoreErrors(meta.getRegionInfo(), abort); 2758 } 2759 } 2760 2761 /** 2762 * Schedule closes on all user regions. Should be safe calling multiple times because it wont' 2763 * close regions that are already closed or that are closing. 2764 * @param abort Whether we're running an abort. 2765 */ 2766 private void closeUserRegions(final boolean abort) { 2767 this.onlineRegionsLock.writeLock().lock(); 2768 try { 2769 for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) { 2770 HRegion r = e.getValue(); 2771 if (!r.getRegionInfo().isMetaRegion() && r.isAvailable()) { 2772 // Don't update zk with this close transition; pass false. 2773 closeRegionIgnoreErrors(r.getRegionInfo(), abort); 2774 } 2775 } 2776 } finally { 2777 this.onlineRegionsLock.writeLock().unlock(); 2778 } 2779 } 2780 2781 protected Map<String, HRegion> getOnlineRegions() { 2782 return this.onlineRegions; 2783 } 2784 2785 public int getNumberOfOnlineRegions() { 2786 return this.onlineRegions.size(); 2787 } 2788 2789 /** 2790 * For tests, web ui and metrics. This method will only work if HRegionServer is in the same JVM 2791 * as client; HRegion cannot be serialized to cross an rpc. 2792 */ 2793 public Collection<HRegion> getOnlineRegionsLocalContext() { 2794 Collection<HRegion> regions = this.onlineRegions.values(); 2795 return Collections.unmodifiableCollection(regions); 2796 } 2797 2798 @Override 2799 public void addRegion(HRegion region) { 2800 this.onlineRegions.put(region.getRegionInfo().getEncodedName(), region); 2801 configurationManager.registerObserver(region); 2802 } 2803 2804 private void addRegion(SortedMap<Long, Collection<HRegion>> sortedRegions, HRegion region, 2805 long size) { 2806 if (!sortedRegions.containsKey(size)) { 2807 sortedRegions.put(size, new ArrayList<>()); 2808 } 2809 sortedRegions.get(size).add(region); 2810 } 2811 2812 /** 2813 * @return A new Map of online regions sorted by region off-heap size with the first entry being 2814 * the biggest. 2815 */ 2816 SortedMap<Long, Collection<HRegion>> getCopyOfOnlineRegionsSortedByOffHeapSize() { 2817 // we'll sort the regions in reverse 2818 SortedMap<Long, Collection<HRegion>> sortedRegions = new TreeMap<>(Comparator.reverseOrder()); 2819 // Copy over all regions. Regions are sorted by size with biggest first. 2820 for (HRegion region : this.onlineRegions.values()) { 2821 addRegion(sortedRegions, region, region.getMemStoreOffHeapSize()); 2822 } 2823 return sortedRegions; 2824 } 2825 2826 /** 2827 * @return A new Map of online regions sorted by region heap size with the first entry being the 2828 * biggest. 2829 */ 2830 SortedMap<Long, Collection<HRegion>> getCopyOfOnlineRegionsSortedByOnHeapSize() { 2831 // we'll sort the regions in reverse 2832 SortedMap<Long, Collection<HRegion>> sortedRegions = new TreeMap<>(Comparator.reverseOrder()); 2833 // Copy over all regions. Regions are sorted by size with biggest first. 2834 for (HRegion region : this.onlineRegions.values()) { 2835 addRegion(sortedRegions, region, region.getMemStoreHeapSize()); 2836 } 2837 return sortedRegions; 2838 } 2839 2840 /** Returns reference to FlushRequester */ 2841 @Override 2842 public FlushRequester getFlushRequester() { 2843 return this.cacheFlusher; 2844 } 2845 2846 @Override 2847 public CompactionRequester getCompactionRequestor() { 2848 return this.compactSplitThread; 2849 } 2850 2851 @Override 2852 public LeaseManager getLeaseManager() { 2853 return leaseManager; 2854 } 2855 2856 /** Returns {@code true} when the data file system is available, {@code false} otherwise. */ 2857 boolean isDataFileSystemOk() { 2858 return this.dataFsOk; 2859 } 2860 2861 public RegionServerCoprocessorHost getRegionServerCoprocessorHost() { 2862 return this.rsHost; 2863 } 2864 2865 @Override 2866 public ConcurrentMap<byte[], Boolean> getRegionsInTransitionInRS() { 2867 return this.regionsInTransitionInRS; 2868 } 2869 2870 @Override 2871 public RegionServerRpcQuotaManager getRegionServerRpcQuotaManager() { 2872 return rsQuotaManager; 2873 } 2874 2875 // 2876 // Main program and support routines 2877 // 2878 /** 2879 * Load the replication executorService objects, if any 2880 */ 2881 private static void createNewReplicationInstance(Configuration conf, HRegionServer server, 2882 FileSystem walFs, Path walDir, Path oldWALDir, WALFactory walFactory) throws IOException { 2883 // read in the name of the source replication class from the config file. 2884 String sourceClassname = conf.get(HConstants.REPLICATION_SOURCE_SERVICE_CLASSNAME, 2885 HConstants.REPLICATION_SERVICE_CLASSNAME_DEFAULT); 2886 2887 // read in the name of the sink replication class from the config file. 2888 String sinkClassname = conf.get(HConstants.REPLICATION_SINK_SERVICE_CLASSNAME, 2889 HConstants.REPLICATION_SINK_SERVICE_CLASSNAME_DEFAULT); 2890 2891 // If both the sink and the source class names are the same, then instantiate 2892 // only one object. 2893 if (sourceClassname.equals(sinkClassname)) { 2894 server.replicationSourceHandler = newReplicationInstance(sourceClassname, 2895 ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory); 2896 server.replicationSinkHandler = (ReplicationSinkService) server.replicationSourceHandler; 2897 server.sameReplicationSourceAndSink = true; 2898 } else { 2899 server.replicationSourceHandler = newReplicationInstance(sourceClassname, 2900 ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory); 2901 server.replicationSinkHandler = newReplicationInstance(sinkClassname, 2902 ReplicationSinkService.class, conf, server, walFs, walDir, oldWALDir, walFactory); 2903 server.sameReplicationSourceAndSink = false; 2904 } 2905 } 2906 2907 private static <T extends ReplicationService> T newReplicationInstance(String classname, 2908 Class<T> xface, Configuration conf, HRegionServer server, FileSystem walFs, Path logDir, 2909 Path oldLogDir, WALFactory walFactory) throws IOException { 2910 final Class<? extends T> clazz; 2911 try { 2912 ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); 2913 clazz = Class.forName(classname, true, classLoader).asSubclass(xface); 2914 } catch (java.lang.ClassNotFoundException nfe) { 2915 throw new IOException("Could not find class for " + classname); 2916 } 2917 T service = ReflectionUtils.newInstance(clazz, conf); 2918 service.initialize(server, walFs, logDir, oldLogDir, walFactory); 2919 return service; 2920 } 2921 2922 public Map<String, ReplicationStatus> getWalGroupsReplicationStatus() { 2923 Map<String, ReplicationStatus> walGroupsReplicationStatus = new TreeMap<>(); 2924 if (!this.isOnline()) { 2925 return walGroupsReplicationStatus; 2926 } 2927 List<ReplicationSourceInterface> allSources = new ArrayList<>(); 2928 allSources.addAll(replicationSourceHandler.getReplicationManager().getSources()); 2929 allSources.addAll(replicationSourceHandler.getReplicationManager().getOldSources()); 2930 for (ReplicationSourceInterface source : allSources) { 2931 walGroupsReplicationStatus.putAll(source.getWalGroupStatus()); 2932 } 2933 return walGroupsReplicationStatus; 2934 } 2935 2936 /** 2937 * Utility for constructing an instance of the passed HRegionServer class. 2938 */ 2939 static HRegionServer constructRegionServer(final Class<? extends HRegionServer> regionServerClass, 2940 final Configuration conf) { 2941 try { 2942 Constructor<? extends HRegionServer> c = 2943 regionServerClass.getConstructor(Configuration.class); 2944 return c.newInstance(conf); 2945 } catch (Exception e) { 2946 throw new RuntimeException( 2947 "Failed construction of " + "Regionserver: " + regionServerClass.toString(), e); 2948 } 2949 } 2950 2951 /** 2952 * @see org.apache.hadoop.hbase.regionserver.HRegionServerCommandLine 2953 */ 2954 public static void main(String[] args) { 2955 LOG.info("STARTING executorService " + HRegionServer.class.getSimpleName()); 2956 VersionInfo.logVersion(); 2957 Configuration conf = HBaseConfiguration.create(); 2958 @SuppressWarnings("unchecked") 2959 Class<? extends HRegionServer> regionServerClass = (Class<? extends HRegionServer>) conf 2960 .getClass(HConstants.REGION_SERVER_IMPL, HRegionServer.class); 2961 2962 new HRegionServerCommandLine(regionServerClass).doMain(args); 2963 } 2964 2965 /** 2966 * Gets the online regions of the specified table. This method looks at the in-memory 2967 * onlineRegions. It does not go to <code>hbase:meta</code>. Only returns <em>online</em> regions. 2968 * If a region on this table has been closed during a disable, etc., it will not be included in 2969 * the returned list. So, the returned list may not necessarily be ALL regions in this table, its 2970 * all the ONLINE regions in the table. 2971 * @param tableName table to limit the scope of the query 2972 * @return Online regions from <code>tableName</code> 2973 */ 2974 @Override 2975 public List<HRegion> getRegions(TableName tableName) { 2976 List<HRegion> tableRegions = new ArrayList<>(); 2977 synchronized (this.onlineRegions) { 2978 for (HRegion region : this.onlineRegions.values()) { 2979 RegionInfo regionInfo = region.getRegionInfo(); 2980 if (regionInfo.getTable().equals(tableName)) { 2981 tableRegions.add(region); 2982 } 2983 } 2984 } 2985 return tableRegions; 2986 } 2987 2988 @Override 2989 public List<HRegion> getRegions() { 2990 List<HRegion> allRegions; 2991 synchronized (this.onlineRegions) { 2992 // Return a clone copy of the onlineRegions 2993 allRegions = new ArrayList<>(onlineRegions.values()); 2994 } 2995 return allRegions; 2996 } 2997 2998 /** 2999 * Gets the online tables in this RS. This method looks at the in-memory onlineRegions. 3000 * @return all the online tables in this RS 3001 */ 3002 public Set<TableName> getOnlineTables() { 3003 Set<TableName> tables = new HashSet<>(); 3004 synchronized (this.onlineRegions) { 3005 for (Region region : this.onlineRegions.values()) { 3006 tables.add(region.getTableDescriptor().getTableName()); 3007 } 3008 } 3009 return tables; 3010 } 3011 3012 public String[] getRegionServerCoprocessors() { 3013 TreeSet<String> coprocessors = new TreeSet<>(); 3014 try { 3015 coprocessors.addAll(getWAL(null).getCoprocessorHost().getCoprocessors()); 3016 } catch (IOException exception) { 3017 LOG.warn("Exception attempting to fetch wal coprocessor information for the common wal; " 3018 + "skipping."); 3019 LOG.debug("Exception details for failure to fetch wal coprocessor information.", exception); 3020 } 3021 Collection<HRegion> regions = getOnlineRegionsLocalContext(); 3022 for (HRegion region : regions) { 3023 coprocessors.addAll(region.getCoprocessorHost().getCoprocessors()); 3024 try { 3025 coprocessors.addAll(getWAL(region.getRegionInfo()).getCoprocessorHost().getCoprocessors()); 3026 } catch (IOException exception) { 3027 LOG.warn("Exception attempting to fetch wal coprocessor information for region " + region 3028 + "; skipping."); 3029 LOG.debug("Exception details for failure to fetch wal coprocessor information.", exception); 3030 } 3031 } 3032 coprocessors.addAll(rsHost.getCoprocessors()); 3033 return coprocessors.toArray(new String[0]); 3034 } 3035 3036 /** 3037 * Try to close the region, logs a warning on failure but continues. 3038 * @param region Region to close 3039 */ 3040 private void closeRegionIgnoreErrors(RegionInfo region, final boolean abort) { 3041 try { 3042 if (!closeRegion(region.getEncodedName(), abort, null)) { 3043 LOG 3044 .warn("Failed to close " + region.getRegionNameAsString() + " - ignoring and continuing"); 3045 } 3046 } catch (IOException e) { 3047 LOG.warn("Failed to close " + region.getRegionNameAsString() + " - ignoring and continuing", 3048 e); 3049 } 3050 } 3051 3052 /** 3053 * Close asynchronously a region, can be called from the master or internally by the regionserver 3054 * when stopping. If called from the master, the region will update the status. 3055 * <p> 3056 * If an opening was in progress, this method will cancel it, but will not start a new close. The 3057 * coprocessors are not called in this case. A NotServingRegionException exception is thrown. 3058 * </p> 3059 * <p> 3060 * If a close was in progress, this new request will be ignored, and an exception thrown. 3061 * </p> 3062 * <p> 3063 * Provides additional flag to indicate if this region blocks should be evicted from the cache. 3064 * </p> 3065 * @param encodedName Region to close 3066 * @param abort True if we are aborting 3067 * @param destination Where the Region is being moved too... maybe null if unknown. 3068 * @return True if closed a region. 3069 * @throws NotServingRegionException if the region is not online 3070 */ 3071 protected boolean closeRegion(String encodedName, final boolean abort, 3072 final ServerName destination) throws NotServingRegionException { 3073 // Check for permissions to close. 3074 HRegion actualRegion = this.getRegion(encodedName); 3075 // Can be null if we're calling close on a region that's not online 3076 if ((actualRegion != null) && (actualRegion.getCoprocessorHost() != null)) { 3077 try { 3078 actualRegion.getCoprocessorHost().preClose(false); 3079 } catch (IOException exp) { 3080 LOG.warn("Unable to close region: the coprocessor launched an error ", exp); 3081 return false; 3082 } 3083 } 3084 3085 // previous can come back 'null' if not in map. 3086 final Boolean previous = 3087 this.regionsInTransitionInRS.putIfAbsent(Bytes.toBytes(encodedName), Boolean.FALSE); 3088 3089 if (Boolean.TRUE.equals(previous)) { 3090 LOG.info("Received CLOSE for the region:" + encodedName + " , which we are already " 3091 + "trying to OPEN. Cancelling OPENING."); 3092 if (!regionsInTransitionInRS.replace(Bytes.toBytes(encodedName), previous, Boolean.FALSE)) { 3093 // The replace failed. That should be an exceptional case, but theoretically it can happen. 3094 // We're going to try to do a standard close then. 3095 LOG.warn("The opening for region " + encodedName + " was done before we could cancel it." 3096 + " Doing a standard close now"); 3097 return closeRegion(encodedName, abort, destination); 3098 } 3099 // Let's get the region from the online region list again 3100 actualRegion = this.getRegion(encodedName); 3101 if (actualRegion == null) { // If already online, we still need to close it. 3102 LOG.info("The opening previously in progress has been cancelled by a CLOSE request."); 3103 // The master deletes the znode when it receives this exception. 3104 throw new NotServingRegionException( 3105 "The region " + encodedName + " was opening but not yet served. Opening is cancelled."); 3106 } 3107 } else if (previous == null) { 3108 LOG.info("Received CLOSE for {}", encodedName); 3109 } else if (Boolean.FALSE.equals(previous)) { 3110 LOG.info("Received CLOSE for the region: " + encodedName 3111 + ", which we are already trying to CLOSE, but not completed yet"); 3112 return true; 3113 } 3114 3115 if (actualRegion == null) { 3116 LOG.debug("Received CLOSE for a region which is not online, and we're not opening."); 3117 this.regionsInTransitionInRS.remove(Bytes.toBytes(encodedName)); 3118 // The master deletes the znode when it receives this exception. 3119 throw new NotServingRegionException( 3120 "The region " + encodedName + " is not online, and is not opening."); 3121 } 3122 3123 CloseRegionHandler crh; 3124 final RegionInfo hri = actualRegion.getRegionInfo(); 3125 if (hri.isMetaRegion()) { 3126 crh = new CloseMetaHandler(this, this, hri, abort); 3127 } else { 3128 crh = new CloseRegionHandler(this, this, hri, abort, destination); 3129 } 3130 this.executorService.submit(crh); 3131 return true; 3132 } 3133 3134 /** 3135 * @return HRegion for the passed binary <code>regionName</code> or null if named region is not 3136 * member of the online regions. 3137 */ 3138 public HRegion getOnlineRegion(final byte[] regionName) { 3139 String encodedRegionName = RegionInfo.encodeRegionName(regionName); 3140 return this.onlineRegions.get(encodedRegionName); 3141 } 3142 3143 @Override 3144 public HRegion getRegion(final String encodedRegionName) { 3145 return this.onlineRegions.get(encodedRegionName); 3146 } 3147 3148 @Override 3149 public boolean removeRegion(final HRegion r, ServerName destination) { 3150 HRegion toReturn = this.onlineRegions.remove(r.getRegionInfo().getEncodedName()); 3151 if (DataTieringManager.getInstance() != null) { 3152 DataTieringManager.getInstance().getRegionColdDataSize() 3153 .remove(r.getRegionInfo().getEncodedName()); 3154 } 3155 metricsRegionServerImpl.requestsCountCache.remove(r.getRegionInfo().getEncodedName()); 3156 if (destination != null) { 3157 long closeSeqNum = r.getMaxFlushedSeqId(); 3158 if (closeSeqNum == HConstants.NO_SEQNUM) { 3159 // No edits in WAL for this region; get the sequence number when the region was opened. 3160 closeSeqNum = r.getOpenSeqNum(); 3161 if (closeSeqNum == HConstants.NO_SEQNUM) { 3162 closeSeqNum = 0; 3163 } 3164 } 3165 boolean selfMove = ServerName.isSameAddress(destination, this.getServerName()); 3166 addToMovedRegions(r.getRegionInfo().getEncodedName(), destination, closeSeqNum, selfMove); 3167 if (selfMove) { 3168 this.regionServerAccounting.getRetainedRegionRWRequestsCnt().put( 3169 r.getRegionInfo().getEncodedName(), 3170 new Pair<>(r.getReadRequestsCount(), r.getWriteRequestsCount())); 3171 } 3172 } 3173 this.regionFavoredNodesMap.remove(r.getRegionInfo().getEncodedName()); 3174 configurationManager.deregisterObserver(r); 3175 return toReturn != null; 3176 } 3177 3178 /** 3179 * Protected Utility method for safely obtaining an HRegion handle. 3180 * @param regionName Name of online {@link HRegion} to return 3181 * @return {@link HRegion} for <code>regionName</code> 3182 */ 3183 protected HRegion getRegion(final byte[] regionName) throws NotServingRegionException { 3184 String encodedRegionName = RegionInfo.encodeRegionName(regionName); 3185 return getRegionByEncodedName(regionName, encodedRegionName); 3186 } 3187 3188 public HRegion getRegionByEncodedName(String encodedRegionName) throws NotServingRegionException { 3189 return getRegionByEncodedName(null, encodedRegionName); 3190 } 3191 3192 private HRegion getRegionByEncodedName(byte[] regionName, String encodedRegionName) 3193 throws NotServingRegionException { 3194 HRegion region = this.onlineRegions.get(encodedRegionName); 3195 if (region == null) { 3196 MovedRegionInfo moveInfo = getMovedRegion(encodedRegionName); 3197 if (moveInfo != null) { 3198 throw new RegionMovedException(moveInfo.getServerName(), moveInfo.getSeqNum()); 3199 } 3200 Boolean isOpening = this.regionsInTransitionInRS.get(Bytes.toBytes(encodedRegionName)); 3201 String regionNameStr = 3202 regionName == null ? encodedRegionName : Bytes.toStringBinary(regionName); 3203 if (isOpening != null && isOpening) { 3204 throw new RegionOpeningException( 3205 "Region " + regionNameStr + " is opening on " + this.serverName); 3206 } 3207 throw new NotServingRegionException( 3208 "" + regionNameStr + " is not online on " + this.serverName); 3209 } 3210 return region; 3211 } 3212 3213 /** 3214 * Cleanup after Throwable caught invoking method. Converts <code>t</code> to IOE if it isn't 3215 * already. 3216 * @param t Throwable 3217 * @param msg Message to log in error. Can be null. 3218 * @return Throwable converted to an IOE; methods can only let out IOEs. 3219 */ 3220 private Throwable cleanup(final Throwable t, final String msg) { 3221 // Don't log as error if NSRE; NSRE is 'normal' operation. 3222 if (t instanceof NotServingRegionException) { 3223 LOG.debug("NotServingRegionException; " + t.getMessage()); 3224 return t; 3225 } 3226 Throwable e = t instanceof RemoteException ? ((RemoteException) t).unwrapRemoteException() : t; 3227 if (msg == null) { 3228 LOG.error("", e); 3229 } else { 3230 LOG.error(msg, e); 3231 } 3232 if (!rpcServices.checkOOME(t)) { 3233 checkFileSystem(); 3234 } 3235 return t; 3236 } 3237 3238 /** 3239 * @param msg Message to put in new IOE if passed <code>t</code> is not an IOE 3240 * @return Make <code>t</code> an IOE if it isn't already. 3241 */ 3242 private IOException convertThrowableToIOE(final Throwable t, final String msg) { 3243 return (t instanceof IOException ? (IOException) t 3244 : msg == null || msg.length() == 0 ? new IOException(t) 3245 : new IOException(msg, t)); 3246 } 3247 3248 /** 3249 * Checks to see if the file system is still accessible. If not, sets abortRequested and 3250 * stopRequested 3251 * @return false if file system is not available 3252 */ 3253 boolean checkFileSystem() { 3254 if (this.dataFsOk && this.dataFs != null) { 3255 try { 3256 FSUtils.checkFileSystemAvailable(this.dataFs); 3257 } catch (IOException e) { 3258 abort("File System not available", e); 3259 this.dataFsOk = false; 3260 } 3261 } 3262 return this.dataFsOk; 3263 } 3264 3265 @Override 3266 public void updateRegionFavoredNodesMapping(String encodedRegionName, 3267 List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> favoredNodes) { 3268 Address[] addr = new Address[favoredNodes.size()]; 3269 // Refer to the comment on the declaration of regionFavoredNodesMap on why 3270 // it is a map of region name to Address[] 3271 for (int i = 0; i < favoredNodes.size(); i++) { 3272 addr[i] = Address.fromParts(favoredNodes.get(i).getHostName(), favoredNodes.get(i).getPort()); 3273 } 3274 regionFavoredNodesMap.put(encodedRegionName, addr); 3275 } 3276 3277 /** 3278 * Return the favored nodes for a region given its encoded name. Look at the comment around 3279 * {@link #regionFavoredNodesMap} on why we convert to InetSocketAddress[] here. 3280 * @param encodedRegionName the encoded region name. 3281 * @return array of favored locations 3282 */ 3283 @Override 3284 public InetSocketAddress[] getFavoredNodesForRegion(String encodedRegionName) { 3285 return Address.toSocketAddress(regionFavoredNodesMap.get(encodedRegionName)); 3286 } 3287 3288 @Override 3289 public ServerNonceManager getNonceManager() { 3290 return this.nonceManager; 3291 } 3292 3293 private static class MovedRegionInfo { 3294 private final ServerName serverName; 3295 private final long seqNum; 3296 3297 MovedRegionInfo(ServerName serverName, long closeSeqNum) { 3298 this.serverName = serverName; 3299 this.seqNum = closeSeqNum; 3300 } 3301 3302 public ServerName getServerName() { 3303 return serverName; 3304 } 3305 3306 public long getSeqNum() { 3307 return seqNum; 3308 } 3309 } 3310 3311 /** 3312 * We need a timeout. If not there is a risk of giving a wrong information: this would double the 3313 * number of network calls instead of reducing them. 3314 */ 3315 private static final int TIMEOUT_REGION_MOVED = (2 * 60 * 1000); 3316 3317 private void addToMovedRegions(String encodedName, ServerName destination, long closeSeqNum, 3318 boolean selfMove) { 3319 if (selfMove) { 3320 LOG.warn("Not adding moved region record: " + encodedName + " to self."); 3321 return; 3322 } 3323 LOG.info("Adding " + encodedName + " move to " + destination + " record at close sequenceid=" 3324 + closeSeqNum); 3325 movedRegionInfoCache.put(encodedName, new MovedRegionInfo(destination, closeSeqNum)); 3326 } 3327 3328 void removeFromMovedRegions(String encodedName) { 3329 movedRegionInfoCache.invalidate(encodedName); 3330 } 3331 3332 @InterfaceAudience.Private 3333 public MovedRegionInfo getMovedRegion(String encodedRegionName) { 3334 return movedRegionInfoCache.getIfPresent(encodedRegionName); 3335 } 3336 3337 @InterfaceAudience.Private 3338 public int movedRegionCacheExpiredTime() { 3339 return TIMEOUT_REGION_MOVED; 3340 } 3341 3342 private String getMyEphemeralNodePath() { 3343 return zooKeeper.getZNodePaths().getRsPath(serverName); 3344 } 3345 3346 private boolean isHealthCheckerConfigured() { 3347 String healthScriptLocation = this.conf.get(HConstants.HEALTH_SCRIPT_LOC); 3348 return org.apache.commons.lang3.StringUtils.isNotBlank(healthScriptLocation); 3349 } 3350 3351 /** Returns the underlying {@link CompactSplit} for the servers */ 3352 public CompactSplit getCompactSplitThread() { 3353 return this.compactSplitThread; 3354 } 3355 3356 CoprocessorServiceResponse execRegionServerService( 3357 @SuppressWarnings("UnusedParameters") final RpcController controller, 3358 final CoprocessorServiceRequest serviceRequest) throws ServiceException { 3359 try { 3360 ServerRpcController serviceController = new ServerRpcController(); 3361 CoprocessorServiceCall call = serviceRequest.getCall(); 3362 String serviceName = call.getServiceName(); 3363 Service service = coprocessorServiceHandlers.get(serviceName); 3364 if (service == null) { 3365 throw new UnknownProtocolException(null, 3366 "No registered coprocessor executorService found for " + serviceName); 3367 } 3368 ServiceDescriptor serviceDesc = service.getDescriptorForType(); 3369 3370 String methodName = call.getMethodName(); 3371 MethodDescriptor methodDesc = serviceDesc.findMethodByName(methodName); 3372 if (methodDesc == null) { 3373 throw new UnknownProtocolException(service.getClass(), 3374 "Unknown method " + methodName + " called on executorService " + serviceName); 3375 } 3376 3377 Message request = CoprocessorRpcUtils.getRequest(service, methodDesc, call.getRequest()); 3378 final Message.Builder responseBuilder = 3379 service.getResponsePrototype(methodDesc).newBuilderForType(); 3380 service.callMethod(methodDesc, serviceController, request, message -> { 3381 if (message != null) { 3382 responseBuilder.mergeFrom(message); 3383 } 3384 }); 3385 IOException exception = CoprocessorRpcUtils.getControllerException(serviceController); 3386 if (exception != null) { 3387 throw exception; 3388 } 3389 return CoprocessorRpcUtils.getResponse(responseBuilder.build(), HConstants.EMPTY_BYTE_ARRAY); 3390 } catch (IOException ie) { 3391 throw new ServiceException(ie); 3392 } 3393 } 3394 3395 /** 3396 * May be null if this is a master which not carry table. 3397 * @return The block cache instance used by the regionserver. 3398 */ 3399 @Override 3400 public Optional<BlockCache> getBlockCache() { 3401 return Optional.ofNullable(this.blockCache); 3402 } 3403 3404 /** 3405 * May be null if this is a master which not carry table. 3406 * @return The cache for mob files used by the regionserver. 3407 */ 3408 @Override 3409 public Optional<MobFileCache> getMobFileCache() { 3410 return Optional.ofNullable(this.mobFileCache); 3411 } 3412 3413 CacheEvictionStats clearRegionBlockCache(Region region) { 3414 long evictedBlocks = 0; 3415 3416 for (Store store : region.getStores()) { 3417 for (StoreFile hFile : store.getStorefiles()) { 3418 evictedBlocks += blockCache.evictBlocksByHfileName(hFile.getPath().getName()); 3419 } 3420 } 3421 3422 return CacheEvictionStats.builder().withEvictedBlocks(evictedBlocks).build(); 3423 } 3424 3425 @Override 3426 public double getCompactionPressure() { 3427 double max = 0; 3428 for (Region region : onlineRegions.values()) { 3429 for (Store store : region.getStores()) { 3430 double normCount = store.getCompactionPressure(); 3431 if (normCount > max) { 3432 max = normCount; 3433 } 3434 } 3435 } 3436 return max; 3437 } 3438 3439 @Override 3440 public HeapMemoryManager getHeapMemoryManager() { 3441 return hMemManager; 3442 } 3443 3444 public MemStoreFlusher getMemStoreFlusher() { 3445 return cacheFlusher; 3446 } 3447 3448 /** 3449 * For testing 3450 * @return whether all wal roll request finished for this regionserver 3451 */ 3452 @InterfaceAudience.Private 3453 public boolean walRollRequestFinished() { 3454 return this.walRoller.walRollFinished(); 3455 } 3456 3457 @Override 3458 public ThroughputController getFlushThroughputController() { 3459 return flushThroughputController; 3460 } 3461 3462 @Override 3463 public double getFlushPressure() { 3464 if (getRegionServerAccounting() == null || cacheFlusher == null) { 3465 // return 0 during RS initialization 3466 return 0.0; 3467 } 3468 return getRegionServerAccounting().getFlushPressure(); 3469 } 3470 3471 @Override 3472 public void onConfigurationChange(Configuration newConf) { 3473 ThroughputController old = this.flushThroughputController; 3474 if (old != null) { 3475 old.stop("configuration change"); 3476 } 3477 this.flushThroughputController = FlushThroughputControllerFactory.create(this, newConf); 3478 try { 3479 Superusers.initialize(newConf); 3480 } catch (IOException e) { 3481 LOG.warn("Failed to initialize SuperUsers on reloading of the configuration"); 3482 } 3483 3484 // update region server coprocessor if the configuration has changed. 3485 if ( 3486 CoprocessorConfigurationUtil.checkConfigurationChange(this.rsHost, newConf, 3487 CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY) 3488 ) { 3489 LOG.info("Update region server coprocessors because the configuration has changed"); 3490 this.rsHost = new RegionServerCoprocessorHost(this, newConf); 3491 } 3492 } 3493 3494 @Override 3495 public MetricsRegionServer getMetrics() { 3496 return metricsRegionServer; 3497 } 3498 3499 @Override 3500 public SecureBulkLoadManager getSecureBulkLoadManager() { 3501 return this.secureBulkLoadManager; 3502 } 3503 3504 @Override 3505 public EntityLock regionLock(final List<RegionInfo> regionInfo, final String description, 3506 final Abortable abort) { 3507 final LockServiceClient client = 3508 new LockServiceClient(conf, lockStub, asyncClusterConnection.getNonceGenerator()); 3509 return client.regionLock(regionInfo, description, abort); 3510 } 3511 3512 @Override 3513 public void unassign(byte[] regionName) throws IOException { 3514 FutureUtils.get(asyncClusterConnection.getAdmin().unassign(regionName, false)); 3515 } 3516 3517 @Override 3518 public RegionServerSpaceQuotaManager getRegionServerSpaceQuotaManager() { 3519 return this.rsSpaceQuotaManager; 3520 } 3521 3522 @Override 3523 public boolean reportFileArchivalForQuotas(TableName tableName, 3524 Collection<Entry<String, Long>> archivedFiles) { 3525 if (TEST_SKIP_REPORTING_TRANSITION) { 3526 return false; 3527 } 3528 RegionServerStatusService.BlockingInterface rss = rssStub; 3529 if (rss == null || rsSpaceQuotaManager == null) { 3530 // the current server could be stopping. 3531 LOG.trace("Skipping file archival reporting to HMaster as stub is null"); 3532 return false; 3533 } 3534 try { 3535 RegionServerStatusProtos.FileArchiveNotificationRequest request = 3536 rsSpaceQuotaManager.buildFileArchiveRequest(tableName, archivedFiles); 3537 rss.reportFileArchival(null, request); 3538 } catch (ServiceException se) { 3539 IOException ioe = ProtobufUtil.getRemoteException(se); 3540 if (ioe instanceof PleaseHoldException) { 3541 if (LOG.isTraceEnabled()) { 3542 LOG.trace("Failed to report file archival(s) to Master because it is initializing." 3543 + " This will be retried.", ioe); 3544 } 3545 // The Master is coming up. Will retry the report later. Avoid re-creating the stub. 3546 return false; 3547 } 3548 if (rssStub == rss) { 3549 rssStub = null; 3550 } 3551 // re-create the stub if we failed to report the archival 3552 createRegionServerStatusStub(true); 3553 LOG.debug("Failed to report file archival(s) to Master. This will be retried.", ioe); 3554 return false; 3555 } 3556 return true; 3557 } 3558 3559 void executeProcedure(long procId, long initiatingMasterActiveTime, 3560 RSProcedureCallable callable) { 3561 executorService 3562 .submit(new RSProcedureHandler(this, procId, initiatingMasterActiveTime, callable)); 3563 } 3564 3565 public void remoteProcedureComplete(long procId, long initiatingMasterActiveTime, Throwable error, 3566 byte[] procResultData) { 3567 procedureResultReporter.complete(procId, initiatingMasterActiveTime, error, procResultData); 3568 } 3569 3570 void reportProcedureDone(ReportProcedureDoneRequest request) throws IOException { 3571 RegionServerStatusService.BlockingInterface rss; 3572 // TODO: juggling class state with an instance variable, outside of a synchronized block :'( 3573 for (;;) { 3574 rss = rssStub; 3575 if (rss != null) { 3576 break; 3577 } 3578 createRegionServerStatusStub(); 3579 } 3580 try { 3581 rss.reportProcedureDone(null, request); 3582 } catch (ServiceException se) { 3583 if (rssStub == rss) { 3584 rssStub = null; 3585 } 3586 throw ProtobufUtil.getRemoteException(se); 3587 } 3588 } 3589 3590 /** 3591 * Will ignore the open/close region procedures which already submitted or executed. When master 3592 * had unfinished open/close region procedure and restarted, new active master may send duplicate 3593 * open/close region request to regionserver. The open/close request is submitted to a thread pool 3594 * and execute. So first need a cache for submitted open/close region procedures. After the 3595 * open/close region request executed and report region transition succeed, cache it in executed 3596 * region procedures cache. See {@link #finishRegionProcedure(long)}. After report region 3597 * transition succeed, master will not send the open/close region request to regionserver again. 3598 * And we thought that the ongoing duplicate open/close region request should not be delayed more 3599 * than 600 seconds. So the executed region procedures cache will expire after 600 seconds. See 3600 * HBASE-22404 for more details. 3601 * @param procId the id of the open/close region procedure 3602 * @return true if the procedure can be submitted. 3603 */ 3604 boolean submitRegionProcedure(long procId) { 3605 if (procId == -1) { 3606 return true; 3607 } 3608 // Ignore the region procedures which already submitted. 3609 Long previous = submittedRegionProcedures.putIfAbsent(procId, procId); 3610 if (previous != null) { 3611 LOG.warn("Received procedure pid={}, which already submitted, just ignore it", procId); 3612 return false; 3613 } 3614 // Ignore the region procedures which already executed. 3615 if (executedRegionProcedures.getIfPresent(procId) != null) { 3616 LOG.warn("Received procedure pid={}, which already executed, just ignore it", procId); 3617 return false; 3618 } 3619 return true; 3620 } 3621 3622 /** 3623 * See {@link #submitRegionProcedure(long)}. 3624 * @param procId the id of the open/close region procedure 3625 */ 3626 public void finishRegionProcedure(long procId) { 3627 executedRegionProcedures.put(procId, procId); 3628 submittedRegionProcedures.remove(procId); 3629 } 3630 3631 /** 3632 * Force to terminate region server when abort timeout. 3633 */ 3634 private static class SystemExitWhenAbortTimeout extends TimerTask { 3635 3636 public SystemExitWhenAbortTimeout() { 3637 } 3638 3639 @Override 3640 public void run() { 3641 LOG.warn("Aborting region server timed out, terminating forcibly" 3642 + " and does not wait for any running shutdown hooks or finalizers to finish their work." 3643 + " Thread dump to stdout."); 3644 Threads.printThreadInfo(System.out, "Zombie HRegionServer"); 3645 Runtime.getRuntime().halt(1); 3646 } 3647 } 3648 3649 @InterfaceAudience.Private 3650 public CompactedHFilesDischarger getCompactedHFilesDischarger() { 3651 return compactedFileDischarger; 3652 } 3653 3654 /** 3655 * Return pause time configured in {@link HConstants#HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME}} 3656 * @return pause time 3657 */ 3658 @InterfaceAudience.Private 3659 public long getRetryPauseTime() { 3660 return this.retryPauseTime; 3661 } 3662 3663 @Override 3664 public Optional<ServerName> getActiveMaster() { 3665 return Optional.ofNullable(masterAddressTracker.getMasterAddress()); 3666 } 3667 3668 @Override 3669 public List<ServerName> getBackupMasters() { 3670 return masterAddressTracker.getBackupMasters(); 3671 } 3672 3673 @Override 3674 public Iterator<ServerName> getBootstrapNodes() { 3675 return bootstrapNodeManager.getBootstrapNodes().iterator(); 3676 } 3677 3678 @Override 3679 public List<HRegionLocation> getMetaLocations() { 3680 return metaRegionLocationCache.getMetaRegionLocations(); 3681 } 3682 3683 @Override 3684 protected NamedQueueRecorder createNamedQueueRecord() { 3685 return NamedQueueRecorder.getInstance(conf); 3686 } 3687 3688 @Override 3689 protected boolean clusterMode() { 3690 // this method will be called in the constructor of super class, so we can not return masterless 3691 // directly here, as it will always be false. 3692 return !conf.getBoolean(MASTERLESS_CONFIG_NAME, false); 3693 } 3694 3695 @InterfaceAudience.Private 3696 public BrokenStoreFileCleaner getBrokenStoreFileCleaner() { 3697 return brokenStoreFileCleaner; 3698 } 3699 3700 @InterfaceAudience.Private 3701 public RSMobFileCleanerChore getRSMobFileCleanerChore() { 3702 return rsMobFileCleanerChore; 3703 } 3704 3705 RSSnapshotVerifier getRsSnapshotVerifier() { 3706 return rsSnapshotVerifier; 3707 } 3708 3709 @Override 3710 protected void stopChores() { 3711 shutdownChore(nonceManagerChore); 3712 shutdownChore(compactionChecker); 3713 shutdownChore(compactedFileDischarger); 3714 shutdownChore(periodicFlusher); 3715 shutdownChore(healthCheckChore); 3716 shutdownChore(executorStatusChore); 3717 shutdownChore(storefileRefresher); 3718 shutdownChore(fsUtilizationChore); 3719 shutdownChore(namedQueueServiceChore); 3720 shutdownChore(brokenStoreFileCleaner); 3721 shutdownChore(rsMobFileCleanerChore); 3722 shutdownChore(replicationMarkerChore); 3723 } 3724 3725 @Override 3726 public RegionReplicationBufferManager getRegionReplicationBufferManager() { 3727 return regionReplicationBufferManager; 3728 } 3729}