001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master; 019 020import java.io.IOException; 021import java.io.InterruptedIOException; 022import java.util.HashSet; 023import java.util.Iterator; 024import java.util.List; 025 026import java.util.Set; 027import java.util.concurrent.ExecutorService; 028import java.util.concurrent.Executors; 029import java.util.stream.Collectors; 030import org.apache.hadoop.hbase.ServerMetrics; 031import org.apache.hadoop.hbase.ServerMetricsBuilder; 032import org.apache.hadoop.hbase.ServerName; 033import org.apache.hadoop.hbase.client.VersionInfoUtil; 034import org.apache.hadoop.hbase.util.Pair; 035import org.apache.hadoop.hbase.zookeeper.ZKListener; 036import org.apache.hadoop.hbase.zookeeper.ZKUtil; 037import org.apache.hadoop.hbase.zookeeper.ZKWatcher; 038import org.apache.hadoop.hbase.zookeeper.ZNodePaths; 039import org.apache.yetus.audience.InterfaceAudience; 040import org.apache.zookeeper.KeeperException; 041import org.slf4j.Logger; 042import org.slf4j.LoggerFactory; 043 044import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; 045 046import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 047import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo; 048 049/** 050 * Tracks the online region servers via ZK. 051 * 052 * Handling of new RSs checking in is done via RPC. This class is only responsible for watching for 053 * expired nodes. It handles listening for changes in the RS node list. The only exception is when 054 * master restart, we will use the list fetched from zk to construct the initial set of live region 055 * servers. 056 * </p> 057 * If an RS node gets deleted, this automatically handles calling of 058 * {@link ServerManager#expireServer(ServerName)} 059 */ 060@InterfaceAudience.Private 061public class RegionServerTracker extends ZKListener { 062 private static final Logger LOG = LoggerFactory.getLogger(RegionServerTracker.class); 063 private final Set<ServerName> regionServers = new HashSet<>(); 064 private final ServerManager serverManager; 065 private final MasterServices server; 066 // As we need to send request to zk when processing the nodeChildrenChanged event, we'd better 067 // move the operation to a single threaded thread pool in order to not block the zk event 068 // processing since all the zk listener across HMaster will be called in one thread sequentially. 069 private final ExecutorService executor; 070 071 public RegionServerTracker(ZKWatcher watcher, MasterServices server, 072 ServerManager serverManager) { 073 super(watcher); 074 this.server = server; 075 this.serverManager = serverManager; 076 this.executor = Executors.newSingleThreadExecutor( 077 new ThreadFactoryBuilder().setDaemon(true).setNameFormat("RegionServerTracker-%d").build()); 078 } 079 080 private Pair<ServerName, RegionServerInfo> getServerInfo(String name) 081 throws KeeperException, IOException { 082 ServerName serverName = ServerName.parseServerName(name); 083 String nodePath = ZNodePaths.joinZNode(watcher.getZNodePaths().rsZNode, name); 084 byte[] data; 085 try { 086 data = ZKUtil.getData(watcher, nodePath); 087 } catch (InterruptedException e) { 088 throw (InterruptedIOException) new InterruptedIOException().initCause(e); 089 } 090 if (data == null) { 091 // we should receive a children changed event later and then we will expire it, so we still 092 // need to add it to the region server set. 093 LOG.warn("Server node {} does not exist, already dead?", name); 094 return Pair.newPair(serverName, null); 095 } 096 if (data.length == 0 || !ProtobufUtil.isPBMagicPrefix(data)) { 097 // this should not happen actually, unless we have bugs or someone has messed zk up. 098 LOG.warn("Invalid data for region server node {} on zookeeper, data length = {}", name, 099 data.length); 100 return Pair.newPair(serverName, null); 101 } 102 RegionServerInfo.Builder builder = RegionServerInfo.newBuilder(); 103 int magicLen = ProtobufUtil.lengthOfPBMagic(); 104 ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen); 105 return Pair.newPair(serverName, builder.build()); 106 } 107 108 /** 109 * Starts the tracking of online RegionServers. 110 * 111 * Starts the tracking of online RegionServers. All RSes will be tracked after this method is 112 * called. 113 * <p/> 114 * In this method, we will also construct the region server sets in {@link ServerManager}. If a 115 * region server is dead between the crash of the previous master instance and the start of the 116 * current master instance, we will schedule a SCP for it. This is done in 117 * {@link ServerManager#findOutDeadServersAndProcess(Set, Set)}, we call it here under the lock 118 * protection to prevent concurrency issues with server expiration operation. 119 * @param deadServersFromPE the region servers which already have SCP associated. 120 * @param liveServersFromWALDir the live region servers from wal directory. 121 * @param splittingServersFromWALDir Servers whose WALs are being actively 'split'. 122 */ 123 public void start(Set<ServerName> deadServersFromPE, Set<ServerName> liveServersFromWALDir, 124 Set<ServerName> splittingServersFromWALDir) 125 throws KeeperException, IOException { 126 LOG.info("Starting RegionServerTracker; {} have existing ServerCrashProcedures, {} " + 127 "possibly 'live' servers, and {} 'splitting'.", deadServersFromPE.size(), 128 liveServersFromWALDir.size(), splittingServersFromWALDir.size()); 129 // deadServersFromPE is made from a list of outstanding ServerCrashProcedures. 130 // splittingServersFromWALDir are being actively split -- the directory in the FS ends in 131 // '-SPLITTING'. Each splitting server should have a corresponding SCP. Log if not. 132 splittingServersFromWALDir.stream().map(s -> !deadServersFromPE.contains(s)). 133 forEach(s -> LOG.error("{} has no matching ServerCrashProcedure", s)); 134 watcher.registerListener(this); 135 synchronized (this) { 136 List<String> servers = 137 ZKUtil.listChildrenAndWatchForNewChildren(watcher, watcher.getZNodePaths().rsZNode); 138 for (String n : servers) { 139 Pair<ServerName, RegionServerInfo> pair = getServerInfo(n); 140 ServerName serverName = pair.getFirst(); 141 RegionServerInfo info = pair.getSecond(); 142 regionServers.add(serverName); 143 ServerMetrics serverMetrics = info != null 144 ? ServerMetricsBuilder.of(serverName, 145 VersionInfoUtil.getVersionNumber(info.getVersionInfo())) 146 : ServerMetricsBuilder.of(serverName); 147 serverManager.checkAndRecordNewServer(serverName, serverMetrics); 148 } 149 serverManager.findOutDeadServersAndProcess(deadServersFromPE, liveServersFromWALDir); 150 } 151 } 152 153 public void stop() { 154 executor.shutdownNow(); 155 } 156 157 private synchronized void refresh() { 158 List<String> names; 159 try { 160 names = ZKUtil.listChildrenAndWatchForNewChildren(watcher, watcher.getZNodePaths().rsZNode); 161 } catch (KeeperException e) { 162 // here we need to abort as we failed to set watcher on the rs node which means that we can 163 // not track the node deleted evetnt any more. 164 server.abort("Unexpected zk exception getting RS nodes", e); 165 return; 166 } 167 Set<ServerName> servers = 168 names.stream().map(ServerName::parseServerName).collect(Collectors.toSet()); 169 for (Iterator<ServerName> iter = regionServers.iterator(); iter.hasNext();) { 170 ServerName sn = iter.next(); 171 if (!servers.contains(sn)) { 172 LOG.info("RegionServer ephemeral node deleted, processing expiration [{}]", sn); 173 serverManager.expireServer(sn); 174 iter.remove(); 175 } 176 } 177 // here we do not need to parse the region server info as it is useless now, we only need the 178 // server name. 179 boolean newServerAdded = false; 180 for (ServerName sn : servers) { 181 if (regionServers.add(sn)) { 182 newServerAdded = true; 183 LOG.info("RegionServer ephemeral node created, adding [" + sn + "]"); 184 } 185 } 186 if (newServerAdded && server.isInitialized()) { 187 // Only call the check to move servers if a RegionServer was added to the cluster; in this 188 // case it could be a server with a new version so it makes sense to run the check. 189 server.checkIfShouldMoveSystemRegionAsync(); 190 } 191 } 192 193 @Override 194 public void nodeChildrenChanged(String path) { 195 if (path.equals(watcher.getZNodePaths().rsZNode) && !server.isAborted() && 196 !server.isStopped()) { 197 executor.execute(this::refresh); 198 } 199 } 200}