View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.master;
19  
20  import java.io.IOException;
21  import java.util.ArrayList;
22  import java.util.Collection;
23  import java.util.Collections;
24  import java.util.HashMap;
25  import java.util.HashSet;
26  import java.util.Iterator;
27  import java.util.List;
28  import java.util.Map;
29  import java.util.Set;
30  import java.util.TreeMap;
31  
32  import com.google.common.annotations.VisibleForTesting;
33  import com.google.common.base.Preconditions;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.hbase.classification.InterfaceAudience;
38  import org.apache.hadoop.conf.Configuration;
39  import org.apache.hadoop.hbase.HConstants;
40  import org.apache.hadoop.hbase.HRegionInfo;
41  import org.apache.hadoop.hbase.HTableDescriptor;
42  import org.apache.hadoop.hbase.MetaTableAccessor;
43  import org.apache.hadoop.hbase.ServerLoad;
44  import org.apache.hadoop.hbase.ServerName;
45  import org.apache.hadoop.hbase.TableName;
46  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
47  import org.apache.hadoop.hbase.master.RegionState.State;
48  import org.apache.hadoop.hbase.client.TableState;
49  import org.apache.hadoop.hbase.util.Bytes;
50  import org.apache.hadoop.hbase.util.FSUtils;
51  import org.apache.hadoop.hbase.util.Pair;
52  
53  /**
54   * Region state accountant. It holds the states of all regions in the memory.
55   * In normal scenario, it should match the meta table and the true region states.
56   *
57   * This map is used by AssignmentManager to track region states.
58   */
59  @InterfaceAudience.Private
60  public class RegionStates {
61    private static final Log LOG = LogFactory.getLog(RegionStates.class);
62  
63    /**
64     * Regions currently in transition.
65     */
66    final HashMap<String, RegionState> regionsInTransition =
67      new HashMap<String, RegionState>();
68  
69    /**
70     * Region encoded name to state map.
71     * All the regions should be in this map.
72     */
73    private final Map<String, RegionState> regionStates =
74      new HashMap<String, RegionState>();
75  
76    /**
77     * Holds mapping of table -> region state
78     */
79    private final Map<TableName, Map<String, RegionState>> regionStatesTableIndex =
80        new HashMap<TableName, Map<String, RegionState>>();
81  
82    /**
83     * Server to regions assignment map.
84     * Contains the set of regions currently assigned to a given server.
85     */
86    private final Map<ServerName, Set<HRegionInfo>> serverHoldings =
87      new HashMap<ServerName, Set<HRegionInfo>>();
88  
89    /**
90     * Maintains the mapping from the default region to the replica regions.
91     */
92    private final Map<HRegionInfo, Set<HRegionInfo>> defaultReplicaToOtherReplicas =
93      new HashMap<HRegionInfo, Set<HRegionInfo>>();
94  
95    /**
96     * Region to server assignment map.
97     * Contains the server a given region is currently assigned to.
98     */
99    private final TreeMap<HRegionInfo, ServerName> regionAssignments =
100     new TreeMap<HRegionInfo, ServerName>();
101 
102   /**
103    * Encoded region name to server assignment map for re-assignment
104    * purpose. Contains the server a given region is last known assigned
105    * to, which has not completed log splitting, so not assignable.
106    * If a region is currently assigned, this server info in this
107    * map should be the same as that in regionAssignments.
108    * However the info in regionAssignments is cleared when the region
109    * is offline while the info in lastAssignments is cleared when
110    * the region is closed or the server is dead and processed.
111    */
112   private final HashMap<String, ServerName> lastAssignments =
113     new HashMap<String, ServerName>();
114 
115   /**
116    * Encoded region name to server assignment map for the
117    * purpose to clean up serverHoldings when a region is online
118    * on a new server. When the region is offline from the previous
119    * server, we cleaned up regionAssignments so that it has the
120    * latest assignment map. But we didn't clean up serverHoldings
121    * to match the meta. We need this map to find out the old server
122    * whose serverHoldings needs cleanup, given a moved region.
123    */
124   private final HashMap<String, ServerName> oldAssignments =
125     new HashMap<String, ServerName>();
126 
127   /**
128    * Map a host port pair string to the latest start code
129    * of a region server which is known to be dead. It is dead
130    * to us, but server manager may not know it yet.
131    */
132   private final HashMap<String, Long> deadServers =
133     new HashMap<String, Long>();
134 
135   /**
136    * Map a dead servers to the time when log split is done.
137    * Since log splitting is not ordered, we have to remember
138    * all processed instances. The map is cleaned up based
139    * on a configured time. By default, we assume a dead
140    * server should be done with log splitting in two hours.
141    */
142   private final HashMap<ServerName, Long> processedServers =
143     new HashMap<ServerName, Long>();
144   private long lastProcessedServerCleanTime;
145 
146   private final TableStateManager tableStateManager;
147   private final RegionStateStore regionStateStore;
148   private final ServerManager serverManager;
149   private final MasterServices server;
150 
151   // The maximum time to keep a log split info in region states map
152   static final String LOG_SPLIT_TIME = "hbase.master.maximum.logsplit.keeptime";
153   static final long DEFAULT_LOG_SPLIT_TIME = 7200000L; // 2 hours
154 
155   RegionStates(final MasterServices master, final TableStateManager tableStateManager,
156       final ServerManager serverManager, final RegionStateStore regionStateStore) {
157     this.tableStateManager = tableStateManager;
158     this.regionStateStore = regionStateStore;
159     this.serverManager = serverManager;
160     this.server = master;
161   }
162 
163   /**
164    * @return an unmodifiable the region assignment map
165    */
166   public synchronized Map<HRegionInfo, ServerName> getRegionAssignments() {
167     return Collections.unmodifiableMap(regionAssignments);
168   }
169 
170   /**
171    * Return the replicas (including default) for the regions grouped by ServerName
172    * @param regions
173    * @return a pair containing the groupings as a map
174    */
175   synchronized Map<ServerName, List<HRegionInfo>> getRegionAssignments(
176     Collection<HRegionInfo> regions) {
177     Map<ServerName, List<HRegionInfo>> map = new HashMap<ServerName, List<HRegionInfo>>();
178     for (HRegionInfo region : regions) {
179       HRegionInfo defaultReplica = RegionReplicaUtil.getRegionInfoForDefaultReplica(region);
180       Set<HRegionInfo> allReplicas = defaultReplicaToOtherReplicas.get(defaultReplica);
181       if (allReplicas != null) {
182         for (HRegionInfo hri : allReplicas) {
183           ServerName server = regionAssignments.get(hri);
184           if (server != null) {
185             List<HRegionInfo> regionsOnServer = map.get(server);
186             if (regionsOnServer == null) {
187               regionsOnServer = new ArrayList<HRegionInfo>(1);
188               map.put(server, regionsOnServer);
189             }
190             regionsOnServer.add(hri);
191           }
192         }
193       }
194     }
195     return map;
196   }
197 
198   public synchronized ServerName getRegionServerOfRegion(HRegionInfo hri) {
199     return regionAssignments.get(hri);
200   }
201 
202   /**
203    * Get regions in transition and their states
204    */
205   @SuppressWarnings("unchecked")
206   public synchronized Map<String, RegionState> getRegionsInTransition() {
207     return (Map<String, RegionState>)regionsInTransition.clone();
208   }
209 
210   /**
211    * @return True if specified region in transition.
212    */
213   public synchronized boolean isRegionInTransition(final HRegionInfo hri) {
214     return regionsInTransition.containsKey(hri.getEncodedName());
215   }
216 
217   /**
218    * @return True if specified region in transition.
219    */
220   public synchronized boolean isRegionInTransition(final String encodedName) {
221     return regionsInTransition.containsKey(encodedName);
222   }
223 
224   /**
225    * @return True if any region in transition.
226    */
227   public synchronized boolean isRegionsInTransition() {
228     return !regionsInTransition.isEmpty();
229   }
230 
231   /**
232    * @return True if specified region assigned, and not in transition.
233    */
234   public synchronized boolean isRegionOnline(final HRegionInfo hri) {
235     return !isRegionInTransition(hri) && regionAssignments.containsKey(hri);
236   }
237 
238   /**
239    * @return True if specified region offline/closed, but not in transition.
240    * If the region is not in the map, it is offline to us too.
241    */
242   public synchronized boolean isRegionOffline(final HRegionInfo hri) {
243     return getRegionState(hri) == null || (!isRegionInTransition(hri)
244       && isRegionInState(hri, State.OFFLINE, State.CLOSED));
245   }
246 
247   /**
248    * @return True if specified region is in one of the specified states.
249    */
250   public boolean isRegionInState(
251       final HRegionInfo hri, final State... states) {
252     return isRegionInState(hri.getEncodedName(), states);
253   }
254 
255   /**
256    * @return True if specified region is in one of the specified states.
257    */
258   public boolean isRegionInState(
259       final String encodedName, final State... states) {
260     RegionState regionState = getRegionState(encodedName);
261     return isOneOfStates(regionState, states);
262   }
263 
264   /**
265    * Wait for the state map to be updated by assignment manager.
266    */
267   public synchronized void waitForUpdate(
268       final long timeout) throws InterruptedException {
269     this.wait(timeout);
270   }
271 
272   /**
273    * Get region transition state
274    */
275   public RegionState getRegionTransitionState(final HRegionInfo hri) {
276     return getRegionTransitionState(hri.getEncodedName());
277   }
278 
279   /**
280    * Get region transition state
281    */
282   public synchronized RegionState
283       getRegionTransitionState(final String encodedName) {
284     return regionsInTransition.get(encodedName);
285   }
286 
287   /**
288    * Add a list of regions to RegionStates. If a region is split
289    * and offline, its state will be SPLIT. Otherwise, its state will
290    * be OFFLINE. Region already in RegionStates will be skipped.
291    */
292   public void createRegionStates(
293       final List<HRegionInfo> hris) {
294     for (HRegionInfo hri: hris) {
295       createRegionState(hri);
296     }
297   }
298 
299   /**
300    * Add a region to RegionStates. If the region is split
301    * and offline, its state will be SPLIT. Otherwise, its state will
302    * be OFFLINE. If it is already in RegionStates, this call has
303    * no effect, and the original state is returned.
304    */
305   public RegionState createRegionState(final HRegionInfo hri) {
306     return createRegionState(hri, null, null, null);
307   }
308 
309   /**
310    * Add a region to RegionStates with the specified state.
311    * If the region is already in RegionStates, this call has
312    * no effect, and the original state is returned.
313    *
314    * @param hri the region info to create a state for
315    * @param newState the state to the region in set to
316    * @param serverName the server the region is transitioning on
317    * @param lastHost the last server that hosts the region
318    * @return the current state
319    */
320   public synchronized RegionState createRegionState(final HRegionInfo hri,
321       State newState, ServerName serverName, ServerName lastHost) {
322     if (newState == null || (newState == State.OPEN && serverName == null)) {
323       newState =  State.OFFLINE;
324     }
325     if (hri.isOffline() && hri.isSplit()) {
326       newState = State.SPLIT;
327       serverName = null;
328     }
329     String encodedName = hri.getEncodedName();
330     RegionState regionState = regionStates.get(encodedName);
331     if (regionState != null) {
332       LOG.warn("Tried to create a state for a region already in RegionStates, "
333         + "used existing: " + regionState + ", ignored new: " + newState);
334     } else {
335       regionState = new RegionState(hri, newState, serverName);
336       putRegionState(regionState);
337       if (newState == State.OPEN) {
338         if (!serverName.equals(lastHost)) {
339           LOG.warn("Open region's last host " + lastHost
340             + " should be the same as the current one " + serverName
341             + ", ignored the last and used the current one");
342           lastHost = serverName;
343         }
344         lastAssignments.put(encodedName, lastHost);
345         regionAssignments.put(hri, lastHost);
346       } else if (!isOneOfStates(regionState, State.MERGED, State.SPLIT, State.OFFLINE)) {
347         regionsInTransition.put(encodedName, regionState);
348       }
349       if (lastHost != null && newState != State.SPLIT) {
350         addToServerHoldings(lastHost, hri);
351         if (newState != State.OPEN) {
352           oldAssignments.put(encodedName, lastHost);
353         }
354       }
355     }
356     return regionState;
357   }
358 
359   private RegionState putRegionState(RegionState regionState) {
360     HRegionInfo hri = regionState.getRegion();
361     String encodedName = hri.getEncodedName();
362     TableName table = hri.getTable();
363     RegionState oldState = regionStates.put(encodedName, regionState);
364     Map<String, RegionState> map = regionStatesTableIndex.get(table);
365     if (map == null) {
366       map = new HashMap<String, RegionState>();
367       regionStatesTableIndex.put(table, map);
368     }
369     map.put(encodedName, regionState);
370     return oldState;
371   }
372 
373   /**
374    * Update a region state. It will be put in transition if not already there.
375    */
376   public RegionState updateRegionState(
377       final HRegionInfo hri, final State state) {
378     RegionState regionState = getRegionState(hri.getEncodedName());
379     return updateRegionState(hri, state,
380       regionState == null ? null : regionState.getServerName());
381   }
382 
383   /**
384    * Update a region state. It will be put in transition if not already there.
385    */
386   public RegionState updateRegionState(
387       final HRegionInfo hri, final State state, final ServerName serverName) {
388     return updateRegionState(hri, state, serverName, HConstants.NO_SEQNUM);
389   }
390 
391   public void regionOnline(
392       final HRegionInfo hri, final ServerName serverName) {
393     regionOnline(hri, serverName, HConstants.NO_SEQNUM);
394   }
395 
396   /**
397    * A region is online, won't be in transition any more.
398    * We can't confirm it is really online on specified region server
399    * because it hasn't been put in region server's online region list yet.
400    */
401   public void regionOnline(final HRegionInfo hri,
402       final ServerName serverName, long openSeqNum) {
403     String encodedName = hri.getEncodedName();
404     if (!serverManager.isServerOnline(serverName)) {
405       // This is possible if the region server dies before master gets a
406       // chance to handle ZK event in time. At this time, if the dead server
407       // is already processed by SSH, we should ignore this event.
408       // If not processed yet, ignore and let SSH deal with it.
409       LOG.warn("Ignored, " + encodedName
410         + " was opened on a dead server: " + serverName);
411       return;
412     }
413     updateRegionState(hri, State.OPEN, serverName, openSeqNum);
414 
415     synchronized (this) {
416       regionsInTransition.remove(encodedName);
417       ServerName oldServerName = regionAssignments.put(hri, serverName);
418       if (!serverName.equals(oldServerName)) {
419         if (LOG.isDebugEnabled()) {
420           LOG.debug("Onlined " + hri.getShortNameToLog() + " on " + serverName + " " + hri);
421         } else {
422           LOG.debug("Onlined " + hri.getShortNameToLog() + " on " + serverName);
423         }
424         addToServerHoldings(serverName, hri);
425         addToReplicaMapping(hri);
426         if (oldServerName == null) {
427           oldServerName = oldAssignments.remove(encodedName);
428         }
429         if (oldServerName != null && serverHoldings.containsKey(oldServerName)) {
430           LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName);
431           removeFromServerHoldings(oldServerName, hri);
432         }
433       }
434     }
435   }
436 
437   private void addToServerHoldings(ServerName serverName, HRegionInfo hri) {
438     Set<HRegionInfo> regions = serverHoldings.get(serverName);
439     if (regions == null) {
440       regions = new HashSet<HRegionInfo>();
441       serverHoldings.put(serverName, regions);
442     }
443     regions.add(hri);
444   }
445 
446   private void addToReplicaMapping(HRegionInfo hri) {
447     HRegionInfo defaultReplica = RegionReplicaUtil.getRegionInfoForDefaultReplica(hri);
448     Set<HRegionInfo> replicas =
449         defaultReplicaToOtherReplicas.get(defaultReplica);
450     if (replicas == null) {
451       replicas = new HashSet<HRegionInfo>();
452       defaultReplicaToOtherReplicas.put(defaultReplica, replicas);
453     }
454     replicas.add(hri);
455   }
456 
457   private void removeFromServerHoldings(ServerName serverName, HRegionInfo hri) {
458     Set<HRegionInfo> oldRegions = serverHoldings.get(serverName);
459     oldRegions.remove(hri);
460     if (oldRegions.isEmpty()) {
461       serverHoldings.remove(serverName);
462     }
463   }
464 
465   private void removeFromReplicaMapping(HRegionInfo hri) {
466     HRegionInfo defaultReplica = RegionReplicaUtil.getRegionInfoForDefaultReplica(hri);
467     Set<HRegionInfo> replicas = defaultReplicaToOtherReplicas.get(defaultReplica);
468     if (replicas != null) {
469       replicas.remove(hri);
470       if (replicas.isEmpty()) {
471         defaultReplicaToOtherReplicas.remove(defaultReplica);
472       }
473     }
474   }
475 
476   /**
477    * A dead server's wals have been split so that all the regions
478    * used to be open on it can be safely assigned now. Mark them assignable.
479    */
480   public synchronized void logSplit(final ServerName serverName) {
481     for (Iterator<Map.Entry<String, ServerName>> it
482         = lastAssignments.entrySet().iterator(); it.hasNext();) {
483       Map.Entry<String, ServerName> e = it.next();
484       if (e.getValue().equals(serverName)) {
485         it.remove();
486       }
487     }
488     long now = System.currentTimeMillis();
489     if (LOG.isDebugEnabled()) {
490       LOG.debug("Adding to processed servers " + serverName);
491     }
492     processedServers.put(serverName, Long.valueOf(now));
493     Configuration conf = server.getConfiguration();
494     long obsoleteTime = conf.getLong(LOG_SPLIT_TIME, DEFAULT_LOG_SPLIT_TIME);
495     // Doesn't have to be very accurate about the clean up time
496     if (now > lastProcessedServerCleanTime + obsoleteTime) {
497       lastProcessedServerCleanTime = now;
498       long cutoff = now - obsoleteTime;
499       for (Iterator<Map.Entry<ServerName, Long>> it
500           = processedServers.entrySet().iterator(); it.hasNext();) {
501         Map.Entry<ServerName, Long> e = it.next();
502         if (e.getValue().longValue() < cutoff) {
503           if (LOG.isDebugEnabled()) {
504             LOG.debug("Removed from processed servers " + e.getKey());
505           }
506           it.remove();
507         }
508       }
509     }
510   }
511 
512   /**
513    * Log split is done for a given region, so it is assignable now.
514    */
515   public void logSplit(final HRegionInfo region) {
516     clearLastAssignment(region);
517   }
518 
519   public synchronized void clearLastAssignment(final HRegionInfo region) {
520     lastAssignments.remove(region.getEncodedName());
521   }
522 
523   /**
524    * A region is offline, won't be in transition any more.
525    */
526   public void regionOffline(final HRegionInfo hri) {
527     regionOffline(hri, null);
528   }
529 
530   /**
531    * A region is offline, won't be in transition any more. Its state
532    * should be the specified expected state, which can only be
533    * Split/Merged/Offline/null(=Offline)/SplittingNew/MergingNew.
534    */
535   public void regionOffline(
536       final HRegionInfo hri, final State expectedState) {
537     Preconditions.checkArgument(expectedState == null
538       || RegionState.isUnassignable(expectedState),
539         "Offlined region should not be " + expectedState);
540     if (isRegionInState(hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
541       // Remove it from all region maps
542       deleteRegion(hri);
543       return;
544     }
545     State newState =
546       expectedState == null ? State.OFFLINE : expectedState;
547     updateRegionState(hri, newState);
548     String encodedName = hri.getEncodedName();
549     synchronized (this) {
550       regionsInTransition.remove(encodedName);
551       ServerName oldServerName = regionAssignments.remove(hri);
552       if (oldServerName != null && serverHoldings.containsKey(oldServerName)) {
553         if (newState == State.MERGED || newState == State.SPLIT
554             || hri.isMetaRegion() || tableStateManager.isTableState(hri.getTable(),
555               TableState.State.DISABLED, TableState.State.DISABLING)) {
556           // Offline the region only if it's merged/split, or the table is disabled/disabling.
557           // Otherwise, offline it from this server only when it is online on a different server.
558           LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName);
559           removeFromServerHoldings(oldServerName, hri);
560           removeFromReplicaMapping(hri);
561         } else {
562           // Need to remember it so that we can offline it from this
563           // server when it is online on a different server.
564           oldAssignments.put(encodedName, oldServerName);
565         }
566       }
567     }
568   }
569 
570   /**
571    * A server is offline, all regions on it are dead.
572    */
573   public List<HRegionInfo> serverOffline(final ServerName sn) {
574     // Offline all regions on this server not already in transition.
575     List<HRegionInfo> rits = new ArrayList<HRegionInfo>();
576     Set<HRegionInfo> regionsToCleanIfNoMetaEntry = new HashSet<HRegionInfo>();
577     synchronized (this) {
578       Set<HRegionInfo> assignedRegions = serverHoldings.get(sn);
579       if (assignedRegions == null) {
580         assignedRegions = new HashSet<HRegionInfo>();
581       }
582 
583       // Offline regions outside the loop to avoid ConcurrentModificationException
584       Set<HRegionInfo> regionsToOffline = new HashSet<HRegionInfo>();
585       for (HRegionInfo region : assignedRegions) {
586         // Offline open regions, no need to offline if SPLIT/MERGED/OFFLINE
587         if (isRegionOnline(region)) {
588           regionsToOffline.add(region);
589         } else if (isRegionInState(region, State.SPLITTING, State.MERGING)) {
590           LOG.debug("Offline splitting/merging region " + getRegionState(region));
591           regionsToOffline.add(region);
592         }
593       }
594 
595       for (RegionState state : regionsInTransition.values()) {
596         HRegionInfo hri = state.getRegion();
597         if (assignedRegions.contains(hri)) {
598           // Region is open on this region server, but in transition.
599           // This region must be moving away from this server, or splitting/merging.
600           // SSH will handle it, either skip assigning, or re-assign.
601           LOG.info("Transitioning " + state + " will be handled by SSH for " + sn);
602         } else if (sn.equals(state.getServerName())) {
603           // Region is in transition on this region server, and this
604           // region is not open on this server. So the region must be
605           // moving to this server from another one (i.e. opening or
606           // pending open on this server, was open on another one.
607           // Offline state is also kind of pending open if the region is in
608           // transition. The region could be in failed_close state too if we have
609           // tried several times to open it while this region server is not reachable)
610           if (isOneOfStates(state, State.OPENING, State.PENDING_OPEN,
611               State.FAILED_OPEN, State.FAILED_CLOSE, State.OFFLINE)) {
612             LOG.info("Found region in " + state + " to be reassigned by SSH for " + sn);
613             rits.add(hri);
614           } else if (isOneOfStates(state, State.SPLITTING_NEW)) {
615             regionsToCleanIfNoMetaEntry.add(state.getRegion());
616           } else {
617             LOG.warn("THIS SHOULD NOT HAPPEN: unexpected " + state);
618           }
619         }
620       }
621 
622       for (HRegionInfo hri : regionsToOffline) {
623         regionOffline(hri);
624       }
625 
626       this.notifyAll();
627     }
628     cleanIfNoMetaEntry(regionsToCleanIfNoMetaEntry);
629     return rits;
630   }
631 
632   /**
633    * This method does an RPC to hbase:meta. Do not call this method with a lock/synchronize held.
634    * @param hris The hris to check if empty in hbase:meta and if so, clean them up.
635    */
636   private void cleanIfNoMetaEntry(Set<HRegionInfo> hris) {
637     if (hris.isEmpty()) return;
638     for (HRegionInfo hri: hris) {
639       try {
640         // This is RPC to meta table. It is done while we have a synchronize on
641         // regionstates. No progress will be made if meta is not available at this time.
642         // This is a cleanup task. Not critical.
643         if (MetaTableAccessor.getRegion(server.getConnection(), hri.getEncodedNameAsBytes()) ==
644             null) {
645           regionOffline(hri);
646           FSUtils.deleteRegionDir(server.getConfiguration(), hri);
647         }
648       } catch (IOException e) {
649         LOG.warn("Got exception while deleting " + hri + " directories from file system.", e);
650       }
651     }
652   }
653 
654   /**
655    * Gets the online regions of the specified table.
656    * This method looks at the in-memory state.  It does not go to <code>hbase:meta</code>.
657    * Only returns <em>online</em> regions.  If a region on this table has been
658    * closed during a disable, etc., it will be included in the returned list.
659    * So, the returned list may not necessarily be ALL regions in this table, its
660    * all the ONLINE regions in the table.
661    * @param tableName
662    * @return Online regions from <code>tableName</code>
663    */
664   public synchronized List<HRegionInfo> getRegionsOfTable(TableName tableName) {
665     List<HRegionInfo> tableRegions = new ArrayList<HRegionInfo>();
666     // boundary needs to have table's name but regionID 0 so that it is sorted
667     // before all table's regions.
668     HRegionInfo boundary = new HRegionInfo(tableName, null, null, false, 0L);
669     for (HRegionInfo hri: regionAssignments.tailMap(boundary).keySet()) {
670       if(!hri.getTable().equals(tableName)) break;
671       tableRegions.add(hri);
672     }
673     return tableRegions;
674   }
675 
676   /**
677    * Gets current state of all regions of the table.
678    * This method looks at the in-memory state.  It does not go to <code>hbase:meta</code>.
679    * Method guaranteed to return keys for all states
680    * in {@link org.apache.hadoop.hbase.master.RegionState.State}
681    *
682    * @param tableName
683    * @return Online regions from <code>tableName</code>
684    */
685   public synchronized Map<RegionState.State, List<HRegionInfo>>
686   getRegionByStateOfTable(TableName tableName) {
687     Map<RegionState.State, List<HRegionInfo>> tableRegions =
688         new HashMap<State, List<HRegionInfo>>();
689     for (State state : State.values()) {
690       tableRegions.put(state, new ArrayList<HRegionInfo>());
691     }
692     Map<String, RegionState> indexMap = regionStatesTableIndex.get(tableName);
693     if (indexMap == null)
694       return tableRegions;
695     for (RegionState regionState : indexMap.values()) {
696       tableRegions.get(regionState.getState()).add(regionState.getRegion());
697     }
698     return tableRegions;
699   }
700 
701   /**
702    * Wait on region to clear regions-in-transition.
703    * <p>
704    * If the region isn't in transition, returns immediately.  Otherwise, method
705    * blocks until the region is out of transition.
706    */
707   public synchronized void waitOnRegionToClearRegionsInTransition(
708       final HRegionInfo hri) throws InterruptedException {
709     if (!isRegionInTransition(hri)) return;
710 
711     while(!server.isStopped() && isRegionInTransition(hri)) {
712       RegionState rs = getRegionState(hri);
713       LOG.info("Waiting on " + rs + " to clear regions-in-transition");
714       waitForUpdate(100);
715     }
716 
717     if (server.isStopped()) {
718       LOG.info("Giving up wait on region in " +
719         "transition because stoppable.isStopped is set");
720     }
721   }
722 
723   /**
724    * A table is deleted. Remove its regions from all internal maps.
725    * We loop through all regions assuming we don't delete tables too much.
726    */
727   public void tableDeleted(final TableName tableName) {
728     Set<HRegionInfo> regionsToDelete = new HashSet<HRegionInfo>();
729     synchronized (this) {
730       for (RegionState state: regionStates.values()) {
731         HRegionInfo region = state.getRegion();
732         if (region.getTable().equals(tableName)) {
733           regionsToDelete.add(region);
734         }
735       }
736     }
737     for (HRegionInfo region: regionsToDelete) {
738       deleteRegion(region);
739     }
740   }
741 
742   /**
743    * Get a copy of all regions assigned to a server
744    */
745   public synchronized Set<HRegionInfo> getServerRegions(ServerName serverName) {
746     Set<HRegionInfo> regions = serverHoldings.get(serverName);
747     if (regions == null) return null;
748     return new HashSet<HRegionInfo>(regions);
749   }
750 
751   /**
752    * Remove a region from all state maps.
753    */
754   @VisibleForTesting
755   public synchronized void deleteRegion(final HRegionInfo hri) {
756     String encodedName = hri.getEncodedName();
757     regionsInTransition.remove(encodedName);
758     regionStates.remove(encodedName);
759     TableName table = hri.getTable();
760     Map<String, RegionState> indexMap = regionStatesTableIndex.get(table);
761     indexMap.remove(encodedName);
762     if (indexMap.size() == 0)
763       regionStatesTableIndex.remove(table);
764     lastAssignments.remove(encodedName);
765     ServerName sn = regionAssignments.remove(hri);
766     if (sn != null) {
767       Set<HRegionInfo> regions = serverHoldings.get(sn);
768       regions.remove(hri);
769     }
770   }
771 
772   /**
773    * Checking if a region was assigned to a server which is not online now.
774    * If so, we should hold re-assign this region till SSH has split its wals.
775    * Once logs are split, the last assignment of this region will be reset,
776    * which means a null last assignment server is ok for re-assigning.
777    *
778    * A region server could be dead but we don't know it yet. We may
779    * think it's online falsely. Therefore if a server is online, we still
780    * need to confirm it reachable and having the expected start code.
781    */
782   synchronized boolean wasRegionOnDeadServer(final String encodedName) {
783     ServerName server = lastAssignments.get(encodedName);
784     return isServerDeadAndNotProcessed(server);
785   }
786 
787   synchronized boolean isServerDeadAndNotProcessed(ServerName server) {
788     if (server == null) return false;
789     if (serverManager.isServerOnline(server)) {
790       String hostAndPort = server.getHostAndPort();
791       long startCode = server.getStartcode();
792       Long deadCode = deadServers.get(hostAndPort);
793       if (deadCode == null || startCode > deadCode.longValue()) {
794         if (serverManager.isServerReachable(server)) {
795           return false;
796         }
797         // The size of deadServers won't grow unbounded.
798         deadServers.put(hostAndPort, Long.valueOf(startCode));
799       }
800       // Watch out! If the server is not dead, the region could
801       // remain unassigned. That's why ServerManager#isServerReachable
802       // should use some retry.
803       //
804       // We cache this info since it is very unlikely for that
805       // instance to come back up later on. We don't want to expire
806       // the server since we prefer to let it die naturally.
807       LOG.warn("Couldn't reach online server " + server);
808     }
809     // Now, we know it's dead. Check if it's processed
810     return !processedServers.containsKey(server);
811   }
812 
813  /**
814    * Get the last region server a region was on for purpose of re-assignment,
815    * i.e. should the re-assignment be held back till log split is done?
816    */
817   synchronized ServerName getLastRegionServerOfRegion(final String encodedName) {
818     return lastAssignments.get(encodedName);
819   }
820 
821   synchronized void setLastRegionServerOfRegions(
822       final ServerName serverName, final List<HRegionInfo> regionInfos) {
823     for (HRegionInfo hri: regionInfos) {
824       setLastRegionServerOfRegion(serverName, hri.getEncodedName());
825     }
826   }
827 
828   synchronized void setLastRegionServerOfRegion(
829       final ServerName serverName, final String encodedName) {
830     lastAssignments.put(encodedName, serverName);
831   }
832 
833   synchronized boolean isRegionOnServer(
834       final HRegionInfo hri, final ServerName serverName) {
835     Set<HRegionInfo> regions = serverHoldings.get(serverName);
836     return regions == null ? false : regions.contains(hri);
837   }
838 
839   void splitRegion(HRegionInfo p,
840       HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException {
841 
842     regionStateStore.splitRegion(p, a, b, sn, getRegionReplication(p));
843     synchronized (this) {
844       // After PONR, split is considered to be done.
845       // Update server holdings to be aligned with the meta.
846       Set<HRegionInfo> regions = serverHoldings.get(sn);
847       if (regions == null) {
848         throw new IllegalStateException(sn + " should host some regions");
849       }
850       regions.remove(p);
851       regions.add(a);
852       regions.add(b);
853     }
854   }
855 
856   void mergeRegions(HRegionInfo p,
857       HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException {
858     regionStateStore.mergeRegions(p, a, b, sn, getRegionReplication(a));
859     synchronized (this) {
860       // After PONR, merge is considered to be done.
861       // Update server holdings to be aligned with the meta.
862       Set<HRegionInfo> regions = serverHoldings.get(sn);
863       if (regions == null) {
864         throw new IllegalStateException(sn + " should host some regions");
865       }
866       regions.remove(a);
867       regions.remove(b);
868       regions.add(p);
869     }
870   }
871 
872   private int getRegionReplication(HRegionInfo r) throws IOException {
873     if (tableStateManager != null) {
874       HTableDescriptor htd = server.getTableDescriptors().get(r.getTable());
875       if (htd != null) {
876         return htd.getRegionReplication();
877       }
878     }
879     return 1;
880   }
881 
882   /**
883    * At cluster clean re/start, mark all user regions closed except those of tables
884    * that are excluded, such as disabled/disabling/enabling tables. All user regions
885    * and their previous locations are returned.
886    */
887   synchronized Map<HRegionInfo, ServerName> closeAllUserRegions(Set<TableName> excludedTables) {
888     boolean noExcludeTables = excludedTables == null || excludedTables.isEmpty();
889     Set<HRegionInfo> toBeClosed = new HashSet<HRegionInfo>(regionStates.size());
890     for(RegionState state: regionStates.values()) {
891       HRegionInfo hri = state.getRegion();
892       if (state.isSplit() || hri.isSplit()) {
893         continue;
894       }
895       TableName tableName = hri.getTable();
896       if (!TableName.META_TABLE_NAME.equals(tableName)
897           && (noExcludeTables || !excludedTables.contains(tableName))) {
898         toBeClosed.add(hri);
899       }
900     }
901     Map<HRegionInfo, ServerName> allUserRegions =
902       new HashMap<HRegionInfo, ServerName>(toBeClosed.size());
903     for (HRegionInfo hri: toBeClosed) {
904       RegionState regionState = updateRegionState(hri, State.CLOSED);
905       allUserRegions.put(hri, regionState.getServerName());
906     }
907     return allUserRegions;
908   }
909 
910   /**
911    * Compute the average load across all region servers.
912    * Currently, this uses a very naive computation - just uses the number of
913    * regions being served, ignoring stats about number of requests.
914    * @return the average load
915    */
916   protected synchronized double getAverageLoad() {
917     int numServers = 0, totalLoad = 0;
918     for (Map.Entry<ServerName, Set<HRegionInfo>> e: serverHoldings.entrySet()) {
919       Set<HRegionInfo> regions = e.getValue();
920       ServerName serverName = e.getKey();
921       int regionCount = regions.size();
922       if (serverManager.isServerOnline(serverName)) {
923         totalLoad += regionCount;
924         numServers++;
925       }
926     }
927     if (numServers > 1) {
928       // The master region server holds only a couple regions.
929       // Don't consider this server in calculating the average load
930       // if there are other region servers to avoid possible confusion.
931       Set<HRegionInfo> hris = serverHoldings.get(server.getServerName());
932       if (hris != null) {
933         totalLoad -= hris.size();
934         numServers--;
935       }
936     }
937     return numServers == 0 ? 0.0 :
938       (double)totalLoad / (double)numServers;
939   }
940 
941   /**
942    * This is an EXPENSIVE clone.  Cloning though is the safest thing to do.
943    * Can't let out original since it can change and at least the load balancer
944    * wants to iterate this exported list.  We need to synchronize on regions
945    * since all access to this.servers is under a lock on this.regions.
946    *
947    * @return A clone of current assignments by table.
948    */
949   protected Map<TableName, Map<ServerName, List<HRegionInfo>>>
950       getAssignmentsByTable() {
951     Map<TableName, Map<ServerName, List<HRegionInfo>>> result =
952       new HashMap<TableName, Map<ServerName,List<HRegionInfo>>>();
953     synchronized (this) {
954       if (!server.getConfiguration().getBoolean("hbase.master.loadbalance.bytable", false)) {
955         Map<ServerName, List<HRegionInfo>> svrToRegions =
956           new HashMap<ServerName, List<HRegionInfo>>(serverHoldings.size());
957         for (Map.Entry<ServerName, Set<HRegionInfo>> e: serverHoldings.entrySet()) {
958           svrToRegions.put(e.getKey(), new ArrayList<HRegionInfo>(e.getValue()));
959         }
960         result.put(TableName.valueOf("ensemble"), svrToRegions);
961       } else {
962         for (Map.Entry<ServerName, Set<HRegionInfo>> e: serverHoldings.entrySet()) {
963           for (HRegionInfo hri: e.getValue()) {
964             if (hri.isMetaRegion()) continue;
965             TableName tablename = hri.getTable();
966             Map<ServerName, List<HRegionInfo>> svrToRegions = result.get(tablename);
967             if (svrToRegions == null) {
968               svrToRegions = new HashMap<ServerName, List<HRegionInfo>>(serverHoldings.size());
969               result.put(tablename, svrToRegions);
970             }
971             List<HRegionInfo> regions = svrToRegions.get(e.getKey());
972             if (regions == null) {
973               regions = new ArrayList<HRegionInfo>();
974               svrToRegions.put(e.getKey(), regions);
975             }
976             regions.add(hri);
977           }
978         }
979       }
980     }
981 
982     Map<ServerName, ServerLoad>
983       onlineSvrs = serverManager.getOnlineServers();
984     // Take care of servers w/o assignments, and remove servers in draining mode
985     List<ServerName> drainingServers = this.serverManager.getDrainingServersList();
986     for (Map<ServerName, List<HRegionInfo>> map: result.values()) {
987       for (ServerName svr: onlineSvrs.keySet()) {
988         if (!map.containsKey(svr)) {
989           map.put(svr, new ArrayList<HRegionInfo>());
990         }
991       }
992       map.keySet().removeAll(drainingServers);
993     }
994     return result;
995   }
996 
997   protected RegionState getRegionState(final HRegionInfo hri) {
998     return getRegionState(hri.getEncodedName());
999   }
1000 
1001   /**
1002    * Returns a clone of region assignments per server
1003    * @return a Map of ServerName to a List of HRegionInfo's
1004    */
1005   protected synchronized Map<ServerName, List<HRegionInfo>> getRegionAssignmentsByServer() {
1006     Map<ServerName, List<HRegionInfo>> regionsByServer =
1007         new HashMap<ServerName, List<HRegionInfo>>(serverHoldings.size());
1008     for (Map.Entry<ServerName, Set<HRegionInfo>> e: serverHoldings.entrySet()) {
1009       regionsByServer.put(e.getKey(), new ArrayList<HRegionInfo>(e.getValue()));
1010     }
1011     return regionsByServer;
1012   }
1013 
1014   protected synchronized RegionState getRegionState(final String encodedName) {
1015     return regionStates.get(encodedName);
1016   }
1017 
1018   /**
1019    * Get the HRegionInfo from cache, if not there, from the hbase:meta table.
1020    * Be careful. Does RPC. Do not hold a lock or synchronize when you call this method.
1021    * @param  regionName
1022    * @return HRegionInfo for the region
1023    */
1024   @SuppressWarnings("deprecation")
1025   protected HRegionInfo getRegionInfo(final byte [] regionName) {
1026     String encodedName = HRegionInfo.encodeRegionName(regionName);
1027     RegionState regionState = getRegionState(encodedName);
1028     if (regionState != null) {
1029       return regionState.getRegion();
1030     }
1031 
1032     try {
1033       Pair<HRegionInfo, ServerName> p =
1034         MetaTableAccessor.getRegion(server.getConnection(), regionName);
1035       HRegionInfo hri = p == null ? null : p.getFirst();
1036       if (hri != null) {
1037         createRegionState(hri);
1038       }
1039       return hri;
1040     } catch (IOException e) {
1041       server.abort("Aborting because error occoured while reading "
1042         + Bytes.toStringBinary(regionName) + " from hbase:meta", e);
1043       return null;
1044     }
1045   }
1046 
1047   static boolean isOneOfStates(RegionState regionState, State... states) {
1048     State s = regionState != null ? regionState.getState() : null;
1049     for (State state: states) {
1050       if (s == state) return true;
1051     }
1052     return false;
1053   }
1054 
1055   /**
1056    * Update a region state. It will be put in transition if not already there.
1057    */
1058   private RegionState updateRegionState(final HRegionInfo hri,
1059       final RegionState.State state, final ServerName serverName, long openSeqNum) {
1060     if (state == RegionState.State.FAILED_CLOSE || state == RegionState.State.FAILED_OPEN) {
1061       LOG.warn("Failed to open/close " + hri.getShortNameToLog()
1062         + " on " + serverName + ", set to " + state);
1063     }
1064 
1065     String encodedName = hri.getEncodedName();
1066     RegionState regionState = new RegionState(
1067       hri, state, System.currentTimeMillis(), serverName);
1068     RegionState oldState = getRegionState(encodedName);
1069     if (!regionState.equals(oldState)) {
1070       LOG.info("Transition " + oldState + " to " + regionState);
1071       // Persist region state before updating in-memory info, if needed
1072       regionStateStore.updateRegionState(openSeqNum, regionState, oldState);
1073     }
1074 
1075     synchronized (this) {
1076       regionsInTransition.put(encodedName, regionState);
1077       putRegionState(regionState);
1078 
1079       // For these states, region should be properly closed.
1080       // There should be no log splitting issue.
1081       if ((state == State.CLOSED || state == State.MERGED
1082           || state == State.SPLIT) && lastAssignments.containsKey(encodedName)) {
1083         ServerName last = lastAssignments.get(encodedName);
1084         if (last.equals(serverName)) {
1085           lastAssignments.remove(encodedName);
1086         } else {
1087           LOG.warn(encodedName + " moved to " + state + " on "
1088             + serverName + ", expected " + last);
1089         }
1090       }
1091 
1092       // Once a region is opened, record its last assignment right away.
1093       if (serverName != null && state == State.OPEN) {
1094         ServerName last = lastAssignments.get(encodedName);
1095         if (!serverName.equals(last)) {
1096           lastAssignments.put(encodedName, serverName);
1097           if (last != null && isServerDeadAndNotProcessed(last)) {
1098             LOG.warn(encodedName + " moved to " + serverName
1099               + ", while it's previous host " + last
1100               + " is dead but not processed yet");
1101           }
1102         }
1103       }
1104 
1105       // notify the change
1106       this.notifyAll();
1107     }
1108     return regionState;
1109   }
1110 }