View Javadoc

1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.client;
21  
22  import java.io.Closeable;
23  import java.io.IOException;
24  import java.lang.reflect.Proxy;
25  import java.lang.reflect.UndeclaredThrowableException;
26  import java.net.InetSocketAddress;
27  import java.util.ArrayList;
28  import java.util.Collections;
29  import java.util.HashMap;
30  import java.util.HashSet;
31  import java.util.LinkedHashMap;
32  import java.util.List;
33  import java.util.Map;
34  import java.util.Map.Entry;
35  import java.util.NavigableMap;
36  import java.util.Set;
37  import java.util.TreeMap;
38  import java.util.concurrent.Callable;
39  import java.util.concurrent.ConcurrentHashMap;
40  import java.util.concurrent.CopyOnWriteArraySet;
41  import java.util.concurrent.ExecutionException;
42  import java.util.concurrent.ExecutorService;
43  import java.util.concurrent.Future;
44  import java.util.concurrent.SynchronousQueue;
45  import java.util.concurrent.ThreadPoolExecutor;
46  import java.util.concurrent.TimeUnit;
47  import java.util.concurrent.atomic.AtomicBoolean;
48  import java.util.concurrent.atomic.AtomicInteger;
49  
50  import org.apache.commons.logging.Log;
51  import org.apache.commons.logging.LogFactory;
52  import org.apache.hadoop.conf.Configuration;
53  import org.apache.hadoop.hbase.DoNotRetryIOException;
54  import org.apache.hadoop.hbase.HBaseConfiguration;
55  import org.apache.hadoop.hbase.HConstants;
56  import org.apache.hadoop.hbase.HRegionInfo;
57  import org.apache.hadoop.hbase.HRegionLocation;
58  import org.apache.hadoop.hbase.HServerAddress;
59  import org.apache.hadoop.hbase.HTableDescriptor;
60  import org.apache.hadoop.hbase.KeyValue;
61  import org.apache.hadoop.hbase.MasterAddressTracker;
62  import org.apache.hadoop.hbase.MasterNotRunningException;
63  import org.apache.hadoop.hbase.RemoteExceptionHandler;
64  import org.apache.hadoop.hbase.ServerName;
65  import org.apache.hadoop.hbase.TableNotFoundException;
66  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
67  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
68  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
69  import org.apache.hadoop.hbase.client.coprocessor.Batch;
70  import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
71  import org.apache.hadoop.hbase.ipc.ExecRPCInvoker;
72  import org.apache.hadoop.hbase.ipc.HBaseRPC;
73  import org.apache.hadoop.hbase.ipc.HMasterInterface;
74  import org.apache.hadoop.hbase.ipc.HRegionInterface;
75  import org.apache.hadoop.hbase.ipc.RpcEngine;
76  import org.apache.hadoop.hbase.util.Addressing;
77  import org.apache.hadoop.hbase.util.Bytes;
78  import org.apache.hadoop.hbase.util.HashedBytes;
79  import org.apache.hadoop.hbase.util.Pair;
80  import org.apache.hadoop.hbase.util.SoftValueSortedMap;
81  import org.apache.hadoop.hbase.util.Threads;
82  import org.apache.hadoop.hbase.util.Writables;
83  import org.apache.hadoop.hbase.zookeeper.ClusterId;
84  import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
85  import org.apache.hadoop.hbase.zookeeper.ZKTableReadOnly;
86  import org.apache.hadoop.hbase.zookeeper.ZKUtil;
87  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
88  import org.apache.hadoop.ipc.RemoteException;
89  import org.apache.zookeeper.KeeperException;
90  
91  /**
92   * A non-instantiable class that manages creation of {@link HConnection}s.
93   * <p>The simplest way to use this class is by using {@link #createConnection(Configuration)}.
94   * This creates a new {@link HConnection} that is managed by the caller.
95   * From this {@link HConnection} {@link HTableInterface} implementations are retrieved 
96   * with {@link HConnection#getTable(byte[])}. Example:
97   * <pre>
98   * {@code
99   * HConnection connection = HConnectionManager.createConnection(config);
100  * HTableInterface table = connection.getTable("table1");
101  * // use the table as needed, for a single operation and a single thread
102  * table.close();
103  * connection.close();
104  * }
105  * </pre>
106  * <p>The following logic and API will be removed in the future:
107  * <p>This class has a static Map of {@link HConnection} instances keyed by
108  * {@link Configuration}; all invocations of {@link #getConnection(Configuration)}
109  * that pass the same {@link Configuration} instance will be returned the same
110  * {@link  HConnection} instance (Adding properties to a Configuration
111  * instance does not change its object identity).  Sharing {@link HConnection}
112  * instances is usually what you want; all clients of the {@link HConnection}
113  * instances share the HConnections' cache of Region locations rather than each
114  * having to discover for itself the location of meta, root, etc.  It makes
115  * sense for the likes of the pool of HTables class {@link HTablePool}, for
116  * instance (If concerned that a single {@link HConnection} is insufficient
117  * for sharing amongst clients in say an heavily-multithreaded environment,
118  * in practise its not proven to be an issue.  Besides, {@link HConnection} is
119  * implemented atop Hadoop RPC and as of this writing, Hadoop RPC does a
120  * connection per cluster-member, exclusively).
121  *
122  * <p>But sharing connections
123  * makes clean up of {@link HConnection} instances a little awkward.  Currently,
124  * clients cleanup by calling
125  * {@link #deleteConnection(Configuration)}.  This will shutdown the
126  * zookeeper connection the HConnection was using and clean up all
127  * HConnection resources as well as stopping proxies to servers out on the
128  * cluster. Not running the cleanup will not end the world; it'll
129  * just stall the closeup some and spew some zookeeper connection failed
130  * messages into the log.  Running the cleanup on a {@link HConnection} that is
131  * subsequently used by another will cause breakage so be careful running
132  * cleanup.
133  * <p>To create a {@link HConnection} that is not shared by others, you can
134  * create a new {@link Configuration} instance, pass this new instance to
135  * {@link #getConnection(Configuration)}, and then when done, close it up by
136  * doing something like the following:
137  * <pre>
138  * {@code
139  * Configuration newConfig = new Configuration(originalConf);
140  * HConnection connection = HConnectionManager.getConnection(newConfig);
141  * // Use the connection to your hearts' delight and then when done...
142  * HConnectionManager.deleteConnection(newConfig, true);
143  * }
144  * </pre>
145  * <p>Cleanup used to be done inside in a shutdown hook.  On startup we'd
146  * register a shutdown hook that called {@link #deleteAllConnections()}
147  * on its way out but the order in which shutdown hooks run is not defined so
148  * were problematic for clients of HConnection that wanted to register their
149  * own shutdown hooks so we removed ours though this shifts the onus for
150  * cleanup to the client.
151  */
152 @SuppressWarnings("serial")
153 public class HConnectionManager {
154   // An LRU Map of HConnectionKey -> HConnection (TableServer).  All
155   // access must be synchronized.  This map is not private because tests
156   // need to be able to tinker with it.
157   static final Map<HConnectionKey, HConnectionImplementation> HBASE_INSTANCES;
158 
159   public static final int MAX_CACHED_HBASE_INSTANCES;
160 
161   private static Log LOG = LogFactory.getLog(HConnectionManager.class);
162 
163   static {
164     // We set instances to one more than the value specified for {@link
165     // HConstants#ZOOKEEPER_MAX_CLIENT_CNXNS}. By default, the zk default max
166     // connections to the ensemble from the one client is 30, so in that case we
167     // should run into zk issues before the LRU hit this value of 31.
168     MAX_CACHED_HBASE_INSTANCES = HBaseConfiguration.create().getInt(
169         HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS,
170         HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS) + 1;
171     HBASE_INSTANCES = new LinkedHashMap<HConnectionKey, HConnectionImplementation>(
172         (int) (MAX_CACHED_HBASE_INSTANCES / 0.75F) + 1, 0.75F, true) {
173        @Override
174       protected boolean removeEldestEntry(
175           Map.Entry<HConnectionKey, HConnectionImplementation> eldest) {
176          return size() > MAX_CACHED_HBASE_INSTANCES;
177        }
178     };
179   }
180 
181   /*
182    * Non-instantiable.
183    */
184   protected HConnectionManager() {
185     super();
186   }
187 
188   /**
189    * Get the connection that goes with the passed <code>conf</code>
190    * configuration instance.
191    * If no current connection exists, method creates a new connection for the
192    * passed <code>conf</code> instance.
193    * @param conf configuration
194    * @return HConnection object for <code>conf</code>
195    * @throws ZooKeeperConnectionException
196    */
197   public static HConnection getConnection(Configuration conf)
198   throws ZooKeeperConnectionException {
199     HConnectionKey connectionKey = new HConnectionKey(conf);
200     synchronized (HBASE_INSTANCES) {
201       HConnectionImplementation connection = HBASE_INSTANCES.get(connectionKey);
202       if (connection == null) {
203         connection = new HConnectionImplementation(conf, true, null);
204         HBASE_INSTANCES.put(connectionKey, connection);
205       } else if (connection.isClosed()) {
206         HConnectionManager.deleteConnection(connectionKey, true);
207         connection = new HConnectionImplementation(conf, true, null);
208         HBASE_INSTANCES.put(connectionKey, connection);
209       }
210       connection.incCount();
211       return connection;
212     }
213   }
214 
215   /**
216    * Create a new HConnection instance using the passed <code>conf</code>
217    * instance.
218    * Note: This bypasses the usual HConnection life cycle management.
219    * The caller is responsible for calling {@link HConnection#close()}
220    * on the returned connection instance.
221    *
222    * This is the recommended way to create HConnections.
223    * {@code
224    * HConnection connection = HConnectionManager.createConnection(conf);
225    * HTableInterface table = connection.getTable("mytable");
226    * table.get(...);
227    * ...
228    * table.close();
229    * connection.close();
230    * }
231    * @param conf configuration
232    * @return HConnection object for <code>conf</code>
233    * @throws ZooKeeperConnectionException
234    */
235   public static HConnection createConnection(Configuration conf)
236   throws ZooKeeperConnectionException {
237     return new HConnectionImplementation(conf, false, null);
238   }
239 
240   public static HConnection createConnection(Configuration conf, ExecutorService pool)
241       throws IOException {
242     return new HConnectionImplementation(conf, false, pool);
243   }
244 
245   /**
246    * Delete connection information for the instance specified by configuration.
247    * If there are no more references to it, this will then close connection to
248    * the zookeeper ensemble and let go of all resources.
249    *
250    * @param conf
251    *          configuration whose identity is used to find {@link HConnection}
252    *          instance.
253    * @param stopProxy
254    *          No longer used.  This parameter is ignored.
255    * @deprecated use {@link #createConnection(org.apache.hadoop.conf.Configuration)} instead
256    */
257   @Deprecated
258   public static void deleteConnection(Configuration conf, boolean stopProxy) {
259     deleteConnection(conf);
260   }
261 
262   /**
263    * Delete connection information for the instance specified by configuration.
264    * If there are no more references to it, this will then close connection to
265    * the zookeeper ensemble and let go of all resources.
266    *
267    * @param conf
268    *          configuration whose identity is used to find {@link HConnection}
269    *          instance.
270    */
271   @Deprecated
272   public static void deleteConnection(Configuration conf) {
273     deleteConnection(new HConnectionKey(conf), false);
274   }
275 
276   /**
277    * Delete stale connection information for the instance specified by configuration.
278    * This will then close connection to
279    * the zookeeper ensemble and let go of all resources.
280    *
281    * @param connection
282    */
283   public static void deleteStaleConnection(HConnection connection) {
284     deleteConnection(connection, true);
285   }
286 
287   /**
288    * Delete information for all connections.
289    * @param stopProxy No longer used.  This parameter is ignored.
290    * @deprecated use {@link #deleteAllConnections()} instead
291    */
292   @Deprecated
293   public static void deleteAllConnections(boolean stopProxy) {
294     deleteAllConnections();
295   }
296 
297   /**
298    * Delete information for all connections.
299    * @throws IOException
300    */
301   @Deprecated
302   public static void deleteAllConnections() {
303     synchronized (HBASE_INSTANCES) {
304       Set<HConnectionKey> connectionKeys = new HashSet<HConnectionKey>();
305       connectionKeys.addAll(HBASE_INSTANCES.keySet());
306       for (HConnectionKey connectionKey : connectionKeys) {
307         deleteConnection(connectionKey, false);
308       }
309       HBASE_INSTANCES.clear();
310     }
311   }
312 
313   @Deprecated
314   private static void deleteConnection(HConnection connection, boolean staleConnection) {
315     synchronized (HBASE_INSTANCES) {
316       for (Entry<HConnectionKey, HConnectionImplementation> connectionEntry : HBASE_INSTANCES
317           .entrySet()) {
318         if (connectionEntry.getValue() == connection) {
319           deleteConnection(connectionEntry.getKey(), staleConnection);
320           break;
321         }
322       }
323     }
324   }
325 
326   @Deprecated
327   private static void deleteConnection(HConnectionKey connectionKey,
328       boolean staleConnection) {
329     synchronized (HBASE_INSTANCES) {
330       HConnectionImplementation connection = HBASE_INSTANCES
331           .get(connectionKey);
332       if (connection != null) {
333         connection.decCount();
334         if (connection.isZeroReference() || staleConnection) {
335           HBASE_INSTANCES.remove(connectionKey);
336           connection.internalClose();
337         }
338       }else {
339         LOG.error("Connection not found in the list, can't delete it "+
340           "(connection key="+connectionKey+"). May be the key was modified?");
341       }
342     }
343   }
344 
345   /**
346    * It is provided for unit test cases which verify the behavior of region
347    * location cache prefetch.
348    * @return Number of cached regions for the table.
349    * @throws ZooKeeperConnectionException
350    */
351   static int getCachedRegionCount(Configuration conf,
352       final byte[] tableName)
353   throws IOException {
354     return execute(new HConnectable<Integer>(conf) {
355       @Override
356       public Integer connect(HConnection connection) {
357         return ((HConnectionImplementation) connection)
358             .getNumberOfCachedRegionLocations(tableName);
359       }
360     });
361   }
362 
363   /**
364    * It's provided for unit test cases which verify the behavior of region
365    * location cache prefetch.
366    * @return true if the region where the table and row reside is cached.
367    * @throws ZooKeeperConnectionException
368    */
369   static boolean isRegionCached(Configuration conf,
370       final byte[] tableName, final byte[] row) throws IOException {
371     return execute(new HConnectable<Boolean>(conf) {
372       @Override
373       public Boolean connect(HConnection connection) {
374         return ((HConnectionImplementation) connection).isRegionCached(tableName, row);
375       }
376     });
377   }
378 
379   /**
380    * This class makes it convenient for one to execute a command in the context
381    * of a {@link HConnection} instance based on the given {@link Configuration}.
382    *
383    * <p>
384    * If you find yourself wanting to use a {@link HConnection} for a relatively
385    * short duration of time, and do not want to deal with the hassle of creating
386    * and cleaning up that resource, then you should consider using this
387    * convenience class.
388    *
389    * @param <T>
390    *          the return type of the {@link HConnectable#connect(HConnection)}
391    *          method.
392    */
393   public static abstract class HConnectable<T> {
394     public Configuration conf;
395 
396     public HConnectable(Configuration conf) {
397       this.conf = conf;
398     }
399 
400     public abstract T connect(HConnection connection) throws IOException;
401   }
402 
403   /**
404    * This convenience method invokes the given {@link HConnectable#connect}
405    * implementation using a {@link HConnection} instance that lasts just for the
406    * duration of that invocation.
407    *
408    * @param <T> the return type of the connect method
409    * @param connectable the {@link HConnectable} instance
410    * @return the value returned by the connect method
411    * @throws IOException
412    */
413   public static <T> T execute(HConnectable<T> connectable) throws IOException {
414     if (connectable == null || connectable.conf == null) {
415       return null;
416     }
417     Configuration conf = connectable.conf;
418     HConnection connection = HConnectionManager.getConnection(conf);
419     boolean connectSucceeded = false;
420     try {
421       T returnValue = connectable.connect(connection);
422       connectSucceeded = true;
423       return returnValue;
424     } finally {
425       try {
426         connection.close();
427       } catch (Exception e) {
428         if (connectSucceeded) {
429           throw new IOException("The connection to " + connection
430               + " could not be deleted.", e);
431         }
432       }
433     }
434   }
435 
436   /**
437    * Denotes a unique key to a {@link HConnection} instance.
438    *
439    * In essence, this class captures the properties in {@link Configuration}
440    * that may be used in the process of establishing a connection. In light of
441    * that, if any new such properties are introduced into the mix, they must be
442    * added to the {@link HConnectionKey#properties} list.
443    *
444    */
445   public static class HConnectionKey {
446     public static String[] CONNECTION_PROPERTIES = new String[] {
447         HConstants.ZOOKEEPER_QUORUM, HConstants.ZOOKEEPER_ZNODE_PARENT,
448         HConstants.ZOOKEEPER_CLIENT_PORT,
449         HConstants.ZOOKEEPER_RECOVERABLE_WAITTIME,
450         HConstants.HBASE_CLIENT_PAUSE, HConstants.HBASE_CLIENT_RETRIES_NUMBER,
451         HConstants.HBASE_CLIENT_RPC_MAXATTEMPTS,
452         HConstants.HBASE_RPC_TIMEOUT_KEY,
453         HConstants.HBASE_CLIENT_PREFETCH_LIMIT,
454         HConstants.HBASE_META_SCANNER_CACHING,
455         HConstants.HBASE_CLIENT_INSTANCE_ID };
456 
457     private Map<String, String> properties;
458     private String username;
459 
460     public HConnectionKey(Configuration conf) {
461       Map<String, String> m = new HashMap<String, String>();
462       if (conf != null) {
463         for (String property : CONNECTION_PROPERTIES) {
464           String value = conf.get(property);
465           if (value != null) {
466             m.put(property, value);
467           }
468         }
469       }
470       this.properties = Collections.unmodifiableMap(m);
471 
472       try {
473         UserProvider provider = UserProvider.instantiate(conf);
474         username = provider.getCurrentUserName();
475       } catch (IOException ioe) {
476         LOG.warn("Error obtaining current user, skipping username in HConnectionKey",
477             ioe);
478       }
479     }
480 
481     @Override
482     public int hashCode() {
483       final int prime = 31;
484       int result = 1;
485       if (username != null) {
486         result = username.hashCode();
487       }
488       for (String property : CONNECTION_PROPERTIES) {
489         String value = properties.get(property);
490         if (value != null) {
491           result = prime * result + value.hashCode();
492         }
493       }
494 
495       return result;
496     }
497 
498     @Override
499     public boolean equals(Object obj) {
500       if (this == obj)
501         return true;
502       if (obj == null)
503         return false;
504       if (getClass() != obj.getClass())
505         return false;
506       HConnectionKey that = (HConnectionKey) obj;
507       if (this.username != null && !this.username.equals(that.username)) {
508         return false;
509       } else if (this.username == null && that.username != null) {
510         return false;
511       }
512       if (this.properties == null) {
513         if (that.properties != null) {
514           return false;
515         }
516       } else {
517         if (that.properties == null) {
518           return false;
519         }
520         for (String property : CONNECTION_PROPERTIES) {
521           String thisValue = this.properties.get(property);
522           String thatValue = that.properties.get(property);
523           if (thisValue == thatValue) {
524             continue;
525           }
526           if (thisValue == null || !thisValue.equals(thatValue)) {
527             return false;
528           }
529         }
530       }
531       return true;
532     }
533 
534     @Override
535     public String toString() {
536       return "HConnectionKey{" +
537         "properties=" + properties +
538         ", username='" + username + '\'' +
539         '}';
540     }
541   }
542 
543   /* Encapsulates connection to zookeeper and regionservers.*/
544   static class HConnectionImplementation implements HConnection, Closeable {
545     static final Log LOG = LogFactory.getLog(HConnectionImplementation.class);
546     private final Class<? extends HRegionInterface> serverInterfaceClass;
547     private final long pause;
548     private final int numRetries;
549     private final int maxRPCAttempts;
550     private final int rpcTimeout;
551     private final int prefetchRegionLimit;
552 
553     private final Object masterLock = new Object();
554     private volatile boolean closed;
555     private volatile boolean aborted;
556     private volatile boolean resetting;
557     private volatile HMasterInterface master;
558     // ZooKeeper reference
559     private volatile ZooKeeperWatcher zooKeeper;
560     // ZooKeeper-based master address tracker
561     private volatile MasterAddressTracker masterAddressTracker;
562     private volatile RootRegionTracker rootRegionTracker;
563     private volatile ClusterId clusterId;
564 
565     private final Object metaRegionLock = new Object();
566 
567     private final Object userRegionLock = new Object();
568 	
569     private final Object resetLock = new Object();
570 
571     // thread executor shared by all HTableInterface instances created
572     // by this connection
573     private volatile ExecutorService batchPool = null;
574     private volatile boolean cleanupPool = false;
575 
576     private final Configuration conf;
577 
578     private RpcEngine rpcEngine;
579 
580     // Known region HServerAddress.toString() -> HRegionInterface
581 
582     private final Map<String, HRegionInterface> servers =
583       new ConcurrentHashMap<String, HRegionInterface>();
584     private final ConcurrentHashMap<String, String> connectionLock =
585       new ConcurrentHashMap<String, String>();
586 
587     /**
588      * Map of table to table {@link HRegionLocation}s.
589      */
590     private final Map<HashedBytes, SoftValueSortedMap<byte [], HRegionLocation>>
591       cachedRegionLocations =
592         new HashMap<HashedBytes, SoftValueSortedMap<byte [], HRegionLocation>>();
593 
594     // The presence of a server in the map implies it's likely that there is an
595     // entry in cachedRegionLocations that map to this server; but the absence
596     // of a server in this map guarentees that there is no entry in cache that
597     // maps to the absent server.
598     private final Set<String> cachedServers =
599         new HashSet<String>();
600 
601     // region cache prefetch is enabled by default. this set contains all
602     // tables whose region cache prefetch are disabled.
603     private final Set<HashedBytes> regionCachePrefetchDisabledTables =
604       new CopyOnWriteArraySet<HashedBytes>();
605 
606     private int refCount;
607 
608     // indicates whether this connection's life cycle is managed
609     private final boolean managed;
610     /**
611      * constructor
612      * @param conf Configuration object
613      */
614     @SuppressWarnings("unchecked")
615     public HConnectionImplementation(Configuration conf, boolean managed, ExecutorService pool)
616     throws ZooKeeperConnectionException {
617       this.conf = conf;
618       this.batchPool = pool;
619       this.managed = managed;
620       String serverClassName = conf.get(HConstants.REGION_SERVER_CLASS,
621         HConstants.DEFAULT_REGION_SERVER_CLASS);
622       this.closed = false;
623       try {
624         this.serverInterfaceClass =
625           (Class<? extends HRegionInterface>) Class.forName(serverClassName);
626       } catch (ClassNotFoundException e) {
627         throw new UnsupportedOperationException(
628             "Unable to find region server interface " + serverClassName, e);
629       }
630       this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
631           HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
632       this.numRetries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
633           HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
634       this.maxRPCAttempts = conf.getInt(
635           HConstants.HBASE_CLIENT_RPC_MAXATTEMPTS,
636           HConstants.DEFAULT_HBASE_CLIENT_RPC_MAXATTEMPTS);
637       this.rpcTimeout = conf.getInt(
638           HConstants.HBASE_RPC_TIMEOUT_KEY,
639           HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
640       this.prefetchRegionLimit = conf.getInt(
641           HConstants.HBASE_CLIENT_PREFETCH_LIMIT,
642           HConstants.DEFAULT_HBASE_CLIENT_PREFETCH_LIMIT);
643 
644       this.master = null;
645       this.resetting = false;
646     }
647 
648     @Override
649     public HTableInterface getTable(String tableName) throws IOException {
650       return getTable(Bytes.toBytes(tableName));
651     }
652 
653     @Override
654     public HTableInterface getTable(byte[] tableName) throws IOException {
655       return getTable(tableName, getBatchPool());
656     }
657 
658     @Override
659     public HTableInterface getTable(String tableName, ExecutorService pool) throws IOException {
660       return getTable(Bytes.toBytes(tableName), pool);
661     }
662 
663     @Override
664     public HTableInterface getTable(byte[] tableName, ExecutorService pool) throws IOException {
665       if (managed) {
666         throw new IOException("The connection has to be unmanaged.");
667       }
668       return new HTable(tableName, this, pool);
669     }
670 
671     private ExecutorService getBatchPool() {
672       if (batchPool == null) {
673         // shared HTable thread executor not yet initialized
674         synchronized (this) {
675           if (batchPool == null) {
676             int maxThreads = conf.getInt("hbase.hconnection.threads.max", Integer.MAX_VALUE);
677             if (maxThreads == 0) {
678               maxThreads = Runtime.getRuntime().availableProcessors();
679             }
680             long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
681             this.batchPool = new ThreadPoolExecutor(Runtime.getRuntime().availableProcessors(),
682                 maxThreads, keepAliveTime, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
683                 Threads.newDaemonThreadFactory("hbase-connection-shared-executor"));
684             ((ThreadPoolExecutor) this.batchPool).allowCoreThreadTimeOut(true);
685           }
686           this.cleanupPool = true;
687         }
688       }
689       return this.batchPool;
690     }
691 
692     protected ExecutorService getCurrentBatchPool() {
693       return batchPool;
694     }
695 
696     private void shutdownBatchPool() {
697       if (this.cleanupPool && this.batchPool != null && !this.batchPool.isShutdown()) {
698         this.batchPool.shutdown();
699         try {
700           if (!this.batchPool.awaitTermination(10, TimeUnit.SECONDS)) {
701             this.batchPool.shutdownNow();
702           }
703         } catch (InterruptedException e) {
704           this.batchPool.shutdownNow();
705         }
706       }
707     }
708 
709     private synchronized void ensureZookeeperTrackers()
710         throws ZooKeeperConnectionException {
711       // initialize zookeeper and master address manager
712       if (zooKeeper == null) {
713         zooKeeper = getZooKeeperWatcher();
714       }
715       if (clusterId == null) {
716         clusterId = new ClusterId(zooKeeper, this);
717         if (clusterId.hasId()) {
718           conf.set(HConstants.CLUSTER_ID, clusterId.getId());
719         }
720       }
721       if (masterAddressTracker == null) {
722         masterAddressTracker = new MasterAddressTracker(zooKeeper, this);
723         masterAddressTracker.start();
724       }
725       if (rootRegionTracker == null) {
726         rootRegionTracker = new RootRegionTracker(zooKeeper, this);
727         rootRegionTracker.start();
728       }
729       // RpcEngine needs access to zookeeper data, like cluster ID
730       if (rpcEngine == null) {
731         this.rpcEngine = HBaseRPC.getProtocolEngine(conf);
732       }
733     }
734 
735     private synchronized void resetZooKeeperTrackers() {
736       if (masterAddressTracker != null) {
737         masterAddressTracker.stop();
738         masterAddressTracker = null;
739       }
740       if (rootRegionTracker != null) {
741         rootRegionTracker.stop();
742         rootRegionTracker = null;
743       }
744       clusterId = null;
745       if (zooKeeper != null) {
746         zooKeeper.close();
747         zooKeeper = null;
748       }
749     }
750 
751     public Configuration getConfiguration() {
752       return this.conf;
753     }
754 
755     /**
756      * Log failure of getMaster attempt
757      * @return true if should retry
758      */
759     private boolean shouldRetryGetMaster(int tries, Exception e) {
760       if (tries == numRetries - 1) {
761         // This was our last chance - don't bother sleeping
762         LOG.info("getMaster attempt " + tries + " of " + numRetries +
763           " failed; no more retrying.", e);
764         return false;
765       }
766       LOG.info("getMaster attempt " + tries + " of " + numRetries +
767         " failed; retrying after sleep of " +
768         ConnectionUtils.getPauseTime(this.pause, tries), e);
769       return true;
770     }
771 
772     public HMasterInterface getMaster()
773     throws MasterNotRunningException, ZooKeeperConnectionException {
774       // TODO: REMOVE.  MOVE TO HBaseAdmin and redo as a Callable!!!
775 
776       // Check if we already have a good master connection
777       try {
778         if (master != null && master.isMasterRunning()) {
779           return master;
780         }
781       } catch (UndeclaredThrowableException ute) {
782         // log, but ignore, the loop below will attempt to reconnect
783         LOG.info("Exception contacting master. Retrying...", ute.getCause());
784       }
785 
786       ensureZookeeperTrackers();
787       checkIfBaseNodeAvailable();
788       ServerName sn = null;
789       synchronized (this.masterLock) {
790         try {
791           if (master != null && master.isMasterRunning()) {
792             return master;
793           }
794         } catch (UndeclaredThrowableException ute) {
795           // log, but ignore, the loop below will attempt to reconnect
796           LOG.info("Exception contacting master. Retrying...", ute.getCause());
797         }
798         this.master = null;
799 
800         for (int tries = 0;
801           !this.closed && this.master == null && tries < numRetries;
802         tries++) {
803 
804           try {
805             sn = masterAddressTracker.getMasterAddress();
806             if (sn == null) {
807               LOG.info("ZooKeeper available but no active master location found");
808               throw new MasterNotRunningException();
809             }
810 
811             InetSocketAddress isa =
812               new InetSocketAddress(sn.getHostname(), sn.getPort());
813             HMasterInterface tryMaster = rpcEngine.getProxy(
814                 HMasterInterface.class, HMasterInterface.VERSION, isa, this.conf,
815                 this.rpcTimeout);
816 
817             if (tryMaster.isMasterRunning()) {
818               this.master = tryMaster;
819               this.masterLock.notifyAll();
820               break;
821             }
822 
823           } catch (IOException e) {
824             if (!shouldRetryGetMaster(tries, e)) break;
825           } catch (UndeclaredThrowableException ute) {
826             if (!shouldRetryGetMaster(tries, ute)) break;
827           }
828 
829           // Cannot connect to master or it is not running. Sleep & retry
830           try {
831             this.masterLock.wait(ConnectionUtils.getPauseTime(this.pause, tries));
832           } catch (InterruptedException e) {
833             Thread.currentThread().interrupt();
834             throw new RuntimeException("Thread was interrupted while trying to connect to master.");
835           }
836         }
837 
838         if (this.master == null) {
839           if (sn == null) {
840             throw new MasterNotRunningException();
841           }
842           throw new MasterNotRunningException(sn.toString());
843         }
844         return this.master;
845       }
846     }
847 
848     private void checkIfBaseNodeAvailable() throws MasterNotRunningException {
849       if (false == masterAddressTracker.checkIfBaseNodeAvailable()) {
850         String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. "
851             + "There could be a mismatch with the one configured in the master.";
852         LOG.error(errorMsg);
853         throw new MasterNotRunningException(errorMsg);
854       }
855     }
856 
857     public boolean isMasterRunning()
858     throws MasterNotRunningException, ZooKeeperConnectionException {
859       if (this.master == null) {
860         getMaster();
861       }
862       boolean isRunning = master.isMasterRunning();
863       if(isRunning) {
864         return true;
865       }
866       throw new MasterNotRunningException();
867     }
868 
869     public HRegionLocation getRegionLocation(final byte [] name,
870         final byte [] row, boolean reload)
871     throws IOException {
872       return reload? relocateRegion(name, row): locateRegion(name, row);
873     }
874 
875     public boolean isTableEnabled(byte[] tableName) throws IOException {
876       return testTableOnlineState(tableName, true);
877     }
878 
879     public boolean isTableDisabled(byte[] tableName) throws IOException {
880       return testTableOnlineState(tableName, false);
881     }
882 
883     public boolean isTableAvailable(final byte[] tableName) throws IOException {
884       final AtomicBoolean available = new AtomicBoolean(true);
885       final AtomicInteger regionCount = new AtomicInteger(0);
886       MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
887         @Override
888         public boolean processRow(Result row) throws IOException {
889           byte[] value = row.getValue(HConstants.CATALOG_FAMILY,
890               HConstants.REGIONINFO_QUALIFIER);
891           HRegionInfo info = Writables.getHRegionInfoOrNull(value);
892           if (info != null && !info.isSplitParent()) {
893             if (Bytes.equals(tableName, info.getTableName())) {
894               value = row.getValue(HConstants.CATALOG_FAMILY,
895                   HConstants.SERVER_QUALIFIER);
896               if (value == null) {
897                 available.set(false);
898                 return false;
899               }
900               regionCount.incrementAndGet();
901             }
902           }
903           return true;
904         }
905       };
906       MetaScanner.metaScan(conf, this, visitor, null);
907       return available.get() && (regionCount.get() > 0);
908     }
909 
910     /*
911      * @param True if table is online
912      */
913     private boolean testTableOnlineState(byte [] tableName, boolean online)
914     throws IOException {
915       if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) {
916         // The root region is always enabled
917         return online;
918       }
919       ZooKeeperWatcher zkw = getZooKeeperWatcher();
920       String tableNameStr = Bytes.toString(tableName);
921       try {
922         if (online) {
923           return ZKTableReadOnly.isEnabledTable(zkw, tableNameStr);
924         }
925         return ZKTableReadOnly.isDisabledTable(zkw, tableNameStr);
926       } catch (KeeperException e) {
927         throw new IOException("Enable/Disable failed", e);
928       }
929     }
930 
931     @Override
932     public HRegionLocation locateRegion(final byte[] regionName) throws IOException {
933       return locateRegion(HRegionInfo.getTableName(regionName),
934         HRegionInfo.getStartKey(regionName), false, true);
935     }
936 
937     @Override
938     public List<HRegionLocation> locateRegions(final byte[] tableName)
939     throws IOException {
940       return locateRegions(tableName, false, true);
941     }
942 
943     @Override
944     public List<HRegionLocation> locateRegions(final byte[] tableName, final boolean useCache,
945         final boolean offlined) throws IOException {
946       NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(conf, this,
947           tableName, offlined);
948       final List<HRegionLocation> locations = new ArrayList<HRegionLocation>();
949       for (HRegionInfo regionInfo : regions.keySet()) {
950         locations.add(locateRegion(tableName, regionInfo.getStartKey(), useCache, true));
951       }
952       return locations;
953     }
954 
955     public HRegionLocation locateRegion(final byte [] tableName,
956         final byte [] row)
957     throws IOException{
958       return locateRegion(tableName, row, true, true);
959     }
960 
961     public HRegionLocation relocateRegion(final byte [] tableName,
962         final byte [] row)
963     throws IOException{
964 
965       // Since this is an explicit request not to use any caching, finding
966       // disabled tables should not be desirable.  This will ensure that an exception is thrown when
967       // the first time a disabled table is interacted with.
968       if (isTableDisabled(tableName)) {
969         throw new DoNotRetryIOException(Bytes.toString(tableName) + " is disabled.");
970       }
971 
972       return locateRegion(tableName, row, false, true);
973     }
974 
975     private HRegionLocation locateRegion(final byte [] tableName,
976       final byte [] row, boolean useCache, boolean retry)
977     throws IOException {
978       if (this.closed) throw new IOException(toString() + " closed");
979       if (tableName == null || tableName.length == 0) {
980         throw new IllegalArgumentException(
981             "table name cannot be null or zero length");
982       }
983       ensureZookeeperTrackers();
984       if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) {
985         try {
986           ServerName servername = this.rootRegionTracker.waitRootRegionLocation(this.rpcTimeout);
987           LOG.debug("Looked up root region location, connection=" + this +
988             "; serverName=" + ((servername == null)? "": servername.toString()));
989           if (servername == null) return null;
990           return new HRegionLocation(HRegionInfo.ROOT_REGIONINFO,
991             servername.getHostname(), servername.getPort());
992         } catch (InterruptedException e) {
993           Thread.currentThread().interrupt();
994           return null;
995         }
996       } else if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
997         return locateRegionInMeta(HConstants.ROOT_TABLE_NAME, tableName, row,
998             useCache, metaRegionLock, retry);
999       } else {
1000         // Region not in the cache - have to go to the meta RS
1001         return locateRegionInMeta(HConstants.META_TABLE_NAME, tableName, row,
1002             useCache, userRegionLock, retry);
1003       }
1004     }
1005 
1006     /*
1007      * Search .META. for the HRegionLocation info that contains the table and
1008      * row we're seeking. It will prefetch certain number of regions info and
1009      * save them to the global region cache.
1010      */
1011     private void prefetchRegionCache(final byte[] tableName,
1012         final byte[] row) {
1013       // Implement a new visitor for MetaScanner, and use it to walk through
1014       // the .META.
1015       MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
1016         public boolean processRow(Result result) throws IOException {
1017           try {
1018             byte[] value = result.getValue(HConstants.CATALOG_FAMILY,
1019                 HConstants.REGIONINFO_QUALIFIER);
1020             HRegionInfo regionInfo = null;
1021 
1022             if (value != null) {
1023               // convert the row result into the HRegionLocation we need!
1024               regionInfo = Writables.getHRegionInfo(value);
1025 
1026               // possible we got a region of a different table...
1027               if (!Bytes.equals(regionInfo.getTableName(),
1028                   tableName)) {
1029                 return false; // stop scanning
1030               }
1031               if (regionInfo.isOffline()) {
1032                 // don't cache offline regions
1033                 return true;
1034               }
1035               value = result.getValue(HConstants.CATALOG_FAMILY,
1036                   HConstants.SERVER_QUALIFIER);
1037               if (value == null) {
1038                 return true;  // don't cache it
1039               }
1040               final String hostAndPort = Bytes.toString(value);
1041               String hostname = Addressing.parseHostname(hostAndPort);
1042               int port = Addressing.parsePort(hostAndPort);
1043               value = result.getValue(HConstants.CATALOG_FAMILY,
1044                   HConstants.STARTCODE_QUALIFIER);
1045               // instantiate the location
1046               HRegionLocation loc =
1047                 new HRegionLocation(regionInfo, hostname, port);
1048               // cache this meta entry
1049               cacheLocation(tableName, loc);
1050             }
1051             return true;
1052           } catch (RuntimeException e) {
1053             throw new IOException(e);
1054           }
1055         }
1056       };
1057       try {
1058         // pre-fetch certain number of regions info at region cache.
1059         MetaScanner.metaScan(conf, this, visitor, tableName, row,
1060             this.prefetchRegionLimit, HConstants.META_TABLE_NAME);
1061       } catch (IOException e) {
1062         // ignore during prefetch
1063       }
1064     }
1065 
1066     /*
1067       * Search one of the meta tables (-ROOT- or .META.) for the HRegionLocation
1068       * info that contains the table and row we're seeking.
1069       */
1070     private HRegionLocation locateRegionInMeta(final byte [] parentTable,
1071       final byte [] tableName, final byte [] row, boolean useCache,
1072       Object regionLockObject, boolean retry)
1073     throws IOException {
1074       HRegionLocation location;
1075       // If we are supposed to be using the cache, look in the cache to see if
1076       // we already have the region.
1077       if (useCache) {
1078         location = getCachedLocation(tableName, row);
1079         if (location != null) {
1080           return location;
1081         }
1082       }
1083 
1084       int localNumRetries = retry ? numRetries : 1;
1085       // build the key of the meta region we should be looking for.
1086       // the extra 9's on the end are necessary to allow "exact" matches
1087       // without knowing the precise region names.
1088       byte [] metaKey = HRegionInfo.createRegionName(tableName, row,
1089         HConstants.NINES, false);
1090       for (int tries = 0; true; tries++) {
1091         if (tries >= localNumRetries) {
1092           throw new NoServerForRegionException("Unable to find region for "
1093             + Bytes.toStringBinary(row) + " after " + numRetries + " tries.");
1094         }
1095 
1096         HRegionLocation metaLocation = null;
1097         try {
1098           // locate the root or meta region
1099           metaLocation = locateRegion(parentTable, metaKey, true, false);
1100           // If null still, go around again.
1101           if (metaLocation == null) continue;
1102           HRegionInterface server =
1103             getHRegionConnection(metaLocation.getHostname(), metaLocation.getPort());
1104 
1105           Result regionInfoRow = null;
1106           if (useCache) {
1107             if (Bytes.equals(parentTable, HConstants.META_TABLE_NAME)
1108                 && (getRegionCachePrefetch(tableName))) {
1109               // This block guards against two threads trying to load the meta
1110               // region at the same time. The first will load the meta region and
1111               // the second will use the value that the first one found.
1112               synchronized (regionLockObject) {
1113                 // Check the cache again for a hit in case some other thread made the
1114                 // same query while we were waiting on the lock.
1115                 location = getCachedLocation(tableName, row);
1116                 if (location != null) {
1117                   return location;
1118                 }
1119                 // If the parent table is META, we may want to pre-fetch some
1120                 // region info into the global region cache for this table.
1121                 prefetchRegionCache(tableName, row);
1122               }
1123             }
1124             location = getCachedLocation(tableName, row);
1125             if (location != null) {
1126               return location;
1127             }
1128           } else {
1129             // If we are not supposed to be using the cache, delete any existing cached location
1130             // so it won't interfere.
1131             deleteCachedLocation(tableName, row);
1132           }
1133 
1134           // Query the root or meta region for the location of the meta region
1135           regionInfoRow = server.getClosestRowBefore(
1136           metaLocation.getRegionInfo().getRegionName(), metaKey,
1137           HConstants.CATALOG_FAMILY);
1138           if (regionInfoRow == null) {
1139             throw new TableNotFoundException(Bytes.toString(tableName));
1140           }
1141           byte [] value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY,
1142               HConstants.REGIONINFO_QUALIFIER);
1143           if (value == null || value.length == 0) {
1144             throw new IOException("HRegionInfo was null or empty in " +
1145               Bytes.toString(parentTable) + ", row=" + regionInfoRow);
1146           }
1147           // convert the row result into the HRegionLocation we need!
1148           HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable(
1149               value, new HRegionInfo());
1150           // possible we got a region of a different table...
1151           if (!Bytes.equals(regionInfo.getTableName(), tableName)) {
1152             throw new TableNotFoundException(
1153                   "Table '" + Bytes.toString(tableName) + "' was not found, got: " +
1154                   Bytes.toString(regionInfo.getTableName()) + ".");
1155           }
1156           if (regionInfo.isSplit()) {
1157             throw new RegionOfflineException("the only available region for" +
1158               " the required row is a split parent," +
1159               " the daughters should be online soon: " +
1160               regionInfo.getRegionNameAsString());
1161           }
1162           if (regionInfo.isOffline()) {
1163             throw new RegionOfflineException("the region is offline, could" +
1164               " be caused by a disable table call: " +
1165               regionInfo.getRegionNameAsString());
1166           }
1167 
1168           value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY,
1169               HConstants.SERVER_QUALIFIER);
1170           String hostAndPort = "";
1171           if (value != null) {
1172             hostAndPort = Bytes.toString(value);
1173           }
1174           if (hostAndPort.equals("")) {
1175             throw new NoServerForRegionException("No server address listed " +
1176               "in " + Bytes.toString(parentTable) + " for region " +
1177               regionInfo.getRegionNameAsString() + " containing row " +
1178               Bytes.toStringBinary(row));
1179           }
1180 
1181           // Instantiate the location
1182           String hostname = Addressing.parseHostname(hostAndPort);
1183           int port = Addressing.parsePort(hostAndPort);
1184           location = new HRegionLocation(regionInfo, hostname, port);
1185           cacheLocation(tableName, location);
1186           return location;
1187         } catch (TableNotFoundException e) {
1188           // if we got this error, probably means the table just plain doesn't
1189           // exist. rethrow the error immediately. this should always be coming
1190           // from the HTable constructor.
1191           throw e;
1192         } catch (IOException e) {
1193           if (e instanceof RemoteException) {
1194             e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
1195           }
1196           if (tries < numRetries - 1) {
1197             if (LOG.isDebugEnabled()) {
1198               LOG.debug("locateRegionInMeta parentTable=" +
1199                 Bytes.toString(parentTable) + ", metaLocation=" +
1200                 ((metaLocation == null)? "null": "{" + metaLocation + "}") +
1201                 ", attempt=" + tries + " of " +
1202                 this.numRetries + " failed; retrying after sleep of " +
1203                 ConnectionUtils.getPauseTime(this.pause, tries) + " because: " + e.getMessage());
1204             }
1205           } else {
1206             throw e;
1207           }
1208           // Only relocate the parent region if necessary
1209           if(!(e instanceof RegionOfflineException ||
1210               e instanceof NoServerForRegionException)) {
1211             relocateRegion(parentTable, metaKey);
1212           }
1213         }
1214         try{
1215           Thread.sleep(ConnectionUtils.getPauseTime(this.pause, tries));
1216         } catch (InterruptedException e) {
1217           Thread.currentThread().interrupt();
1218           throw new IOException("Giving up trying to location region in " +
1219             "meta: thread is interrupted.");
1220         }
1221       }
1222     }
1223 
1224     /*
1225      * Search the cache for a location that fits our table and row key.
1226      * Return null if no suitable region is located. TODO: synchronization note
1227      *
1228      * <p>TODO: This method during writing consumes 15% of CPU doing lookup
1229      * into the Soft Reference SortedMap.  Improve.
1230      *
1231      * @param tableName
1232      * @param row
1233      * @return Null or region location found in cache.
1234      */
1235     HRegionLocation getCachedLocation(final byte [] tableName,
1236         final byte [] row) {
1237       SoftValueSortedMap<byte [], HRegionLocation> tableLocations =
1238         getTableLocations(tableName);
1239 
1240       // start to examine the cache. we can only do cache actions
1241       // if there's something in the cache for this table.
1242       if (tableLocations.isEmpty()) {
1243         return null;
1244       }
1245 
1246       HRegionLocation possibleRegion = tableLocations.get(row);
1247       if (possibleRegion != null) {
1248         return possibleRegion;
1249       }
1250 
1251       possibleRegion = tableLocations.lowerValueByKey(row);
1252       if (possibleRegion == null) {
1253         return null;
1254       }
1255 
1256       // make sure that the end key is greater than the row we're looking
1257       // for, otherwise the row actually belongs in the next region, not
1258       // this one. the exception case is when the endkey is
1259       // HConstants.EMPTY_END_ROW, signifying that the region we're
1260       // checking is actually the last region in the table.
1261       byte[] endKey = possibleRegion.getRegionInfo().getEndKey();
1262       if (Bytes.equals(endKey, HConstants.EMPTY_END_ROW) ||
1263           KeyValue.getRowComparator(tableName).compareRows(
1264               endKey, 0, endKey.length, row, 0, row.length) > 0) {
1265         return possibleRegion;
1266       }
1267 
1268       // Passed all the way through, so we got nothin - complete cache miss
1269       return null;
1270     }
1271 
1272     /**
1273      * Delete a cached location
1274      * @param tableName tableName
1275      * @param row
1276      */
1277     void deleteCachedLocation(final byte [] tableName, final byte [] row) {
1278       synchronized (this.cachedRegionLocations) {
1279         Map<byte[], HRegionLocation> tableLocations = getTableLocations(tableName);
1280         if (!tableLocations.isEmpty()) {
1281           // start to examine the cache. we can only do cache actions
1282           // if there's something in the cache for this table.
1283           HRegionLocation rl = getCachedLocation(tableName, row);
1284           if (rl != null) {
1285             tableLocations.remove(rl.getRegionInfo().getStartKey());
1286             if (LOG.isDebugEnabled()) {
1287               LOG.debug("Removed " +
1288                 rl.getRegionInfo().getRegionNameAsString() +
1289                 " for tableName=" + Bytes.toString(tableName) +
1290                 " from cache " + "because of " + Bytes.toStringBinary(row));
1291             }
1292           }
1293         }
1294       }
1295     }
1296 
1297     @Override
1298     public void deleteCachedRegionLocation(final HRegionLocation location) {
1299       if (location == null) {
1300         return;
1301       }
1302       synchronized (this.cachedRegionLocations) {
1303         byte[] tableName = location.getRegionInfo().getTableName();
1304         Map<byte[], HRegionLocation> tableLocations = getTableLocations(tableName);
1305         if (!tableLocations.isEmpty()) {
1306           // Delete if there's something in the cache for this region.
1307           HRegionLocation removedLocation =
1308               tableLocations.remove(location.getRegionInfo().getStartKey());
1309           if (LOG.isDebugEnabled() && removedLocation != null) {
1310             LOG.debug("Removed " +
1311               location.getRegionInfo().getRegionNameAsString() +
1312               " for tableName=" + Bytes.toString(tableName) +
1313               " from cache");
1314           }
1315         }
1316       }
1317     }
1318 
1319     @Override
1320     public void clearCaches(String sn) {
1321       clearCachedLocationForServer(sn);
1322     }
1323 
1324     /*
1325      * Delete all cached entries of a table that maps to a specific location.
1326      *
1327      * @param tablename
1328      * @param server
1329      */
1330     private void clearCachedLocationForServer(final String server) {
1331       boolean deletedSomething = false;
1332       synchronized (this.cachedRegionLocations) {
1333         if (!cachedServers.contains(server)) {
1334           return;
1335         }
1336         for (Map<byte[], HRegionLocation> tableLocations :
1337           cachedRegionLocations.values()) {
1338           for (Entry<byte[], HRegionLocation> e : tableLocations.entrySet()) {
1339             HRegionLocation value = e.getValue();
1340             if (value != null
1341                 && value.getHostnamePort().equals(server)) {
1342               tableLocations.remove(e.getKey());
1343               deletedSomething = true;
1344             }
1345           }
1346         }
1347         cachedServers.remove(server);
1348       }
1349       if (deletedSomething && LOG.isDebugEnabled()) {
1350         LOG.debug("Removed all cached region locations that map to " + server);
1351       }
1352     }
1353 
1354     /*
1355      * @param tableName
1356      * @return Map of cached locations for passed <code>tableName</code>
1357      */
1358     private SoftValueSortedMap<byte [], HRegionLocation> getTableLocations(
1359         final byte [] tableName) {
1360       // find the map of cached locations for this table
1361       HashedBytes key = new HashedBytes(tableName);
1362       SoftValueSortedMap<byte [], HRegionLocation> result;
1363       synchronized (this.cachedRegionLocations) {
1364         result = this.cachedRegionLocations.get(key);
1365         // if tableLocations for this table isn't built yet, make one
1366         if (result == null) {
1367           result = new SoftValueSortedMap<byte [], HRegionLocation>(
1368               Bytes.BYTES_COMPARATOR);
1369           this.cachedRegionLocations.put(key, result);
1370         }
1371       }
1372       return result;
1373     }
1374 
1375     @Override
1376     public void clearRegionCache() {
1377       synchronized(this.cachedRegionLocations) {
1378         this.cachedRegionLocations.clear();
1379         this.cachedServers.clear();
1380       }
1381     }
1382 
1383     @Override
1384     public void clearRegionCache(final byte [] tableName) {
1385       synchronized (this.cachedRegionLocations) {
1386         this.cachedRegionLocations.remove(new HashedBytes(tableName));
1387       }
1388     }
1389 
1390     /*
1391      * Put a newly discovered HRegionLocation into the cache.
1392      */
1393     private void cacheLocation(final byte [] tableName,
1394         final HRegionLocation location) {
1395       byte [] startKey = location.getRegionInfo().getStartKey();
1396       Map<byte [], HRegionLocation> tableLocations =
1397         getTableLocations(tableName);
1398       boolean hasNewCache = false;
1399       synchronized (this.cachedRegionLocations) {
1400         cachedServers.add(location.getHostnamePort());
1401         hasNewCache = (tableLocations.put(startKey, location) == null);
1402       }
1403       if (hasNewCache) {
1404         LOG.debug("Cached location for " +
1405             location.getRegionInfo().getRegionNameAsString() +
1406             " is " + location.getHostnamePort());
1407       }
1408     }
1409 
1410     public HRegionInterface getHRegionConnection(HServerAddress hsa)
1411     throws IOException {
1412       return getHRegionConnection(hsa, false);
1413     }
1414 
1415     @Override
1416     public HRegionInterface getHRegionConnection(final String hostname,
1417         final int port)
1418     throws IOException {
1419       return getHRegionConnection(hostname, port, false);
1420     }
1421 
1422     public HRegionInterface getHRegionConnection(HServerAddress hsa,
1423         boolean master)
1424     throws IOException {
1425       return getHRegionConnection(null, -1, hsa.getInetSocketAddress(), master);
1426     }
1427 
1428     @Override
1429     public HRegionInterface getHRegionConnection(final String hostname,
1430         final int port, final boolean master)
1431     throws IOException {
1432       return getHRegionConnection(hostname, port, null, master);
1433     }
1434 
1435     /**
1436      * Either the passed <code>isa</code> is null or <code>hostname</code>
1437      * can be but not both.
1438      * @param hostname
1439      * @param port
1440      * @param isa
1441      * @param master
1442      * @return Proxy.
1443      * @throws IOException
1444      */
1445     HRegionInterface getHRegionConnection(final String hostname, final int port,
1446         final InetSocketAddress isa, final boolean master)
1447     throws IOException {
1448       if (master) getMaster();
1449       HRegionInterface server;
1450       String rsName = null;
1451       if (isa != null) {
1452         rsName = Addressing.createHostAndPortStr(isa.getHostName(),
1453             isa.getPort());
1454       } else {
1455         rsName = Addressing.createHostAndPortStr(hostname, port);
1456       }
1457       ensureZookeeperTrackers();
1458       // See if we already have a connection (common case)
1459       server = this.servers.get(rsName);
1460       if (server == null) {
1461         // create a unique lock for this RS (if necessary)
1462         this.connectionLock.putIfAbsent(rsName, rsName);
1463         // get the RS lock
1464         synchronized (this.connectionLock.get(rsName)) {
1465           // do one more lookup in case we were stalled above
1466           server = this.servers.get(rsName);
1467           if (server == null) {
1468             try {
1469               // Only create isa when we need to.
1470               InetSocketAddress address = isa != null? isa:
1471                 new InetSocketAddress(hostname, port);
1472               // definitely a cache miss. establish an RPC for this RS
1473               server = HBaseRPC.waitForProxy(this.rpcEngine,
1474                   serverInterfaceClass, HRegionInterface.VERSION,
1475                   address, this.conf,
1476                   this.maxRPCAttempts, this.rpcTimeout, this.rpcTimeout);
1477               this.servers.put(Addressing.createHostAndPortStr(
1478                   address.getHostName(), address.getPort()), server);
1479             } catch (RemoteException e) {
1480               LOG.warn("RemoteException connecting to RS", e);
1481               // Throw what the RemoteException was carrying.
1482               throw e.unwrapRemoteException();
1483             }
1484           }
1485         }
1486       }
1487       return server;
1488     }
1489 
1490     /**
1491      * Get the ZooKeeper instance for this TableServers instance.
1492      *
1493      * If ZK has not been initialized yet, this will connect to ZK.
1494      * @returns zookeeper reference
1495      * @throws ZooKeeperConnectionException if there's a problem connecting to zk
1496      */
1497     @Deprecated
1498     public synchronized ZooKeeperWatcher getZooKeeperWatcher()
1499         throws ZooKeeperConnectionException {
1500       if(zooKeeper == null) {
1501         try {
1502           if (this.closed) {
1503             throw new IOException(toString() + " closed");
1504           }
1505           this.zooKeeper = new ZooKeeperWatcher(conf, "hconnection", this);
1506         } catch(ZooKeeperConnectionException zce) {
1507           throw zce;
1508         } catch (IOException e) {
1509           throw new ZooKeeperConnectionException("An error is preventing" +
1510               " HBase from connecting to ZooKeeper", e);
1511         }
1512       }
1513       return zooKeeper;
1514     }
1515 
1516     public <T> T getRegionServerWithRetries(ServerCallable<T> callable)
1517     throws IOException, RuntimeException {
1518       return callable.withRetries();
1519     }
1520 
1521     public <T> T getRegionServerWithoutRetries(ServerCallable<T> callable)
1522     throws IOException, RuntimeException {
1523       return callable.withoutRetries();
1524     }
1525 
1526     private <R> Callable<MultiResponse> createCallable(final HRegionLocation loc,
1527         final MultiAction<R> multi, final byte [] tableName) {
1528       // TODO: This does not belong in here!!! St.Ack  HConnections should
1529       // not be dealing in Callables; Callables have HConnections, not other
1530       // way around.
1531       final HConnection connection = this;
1532       return new Callable<MultiResponse>() {
1533        public MultiResponse call() throws IOException {
1534          ServerCallable<MultiResponse> callable =
1535            new ServerCallable<MultiResponse>(connection, tableName, null) {
1536              public MultiResponse call() throws IOException {
1537                return server.multi(multi);
1538              }
1539              @Override
1540              public void connect(boolean reload) throws IOException {
1541                server = connection.getHRegionConnection(loc.getHostname(), loc.getPort());
1542              }
1543            };
1544          return callable.withoutRetries();
1545        }
1546      };
1547    }
1548 
1549     public void processBatch(List<? extends Row> list,
1550         final byte[] tableName,
1551         ExecutorService pool,
1552         Object[] results) throws IOException, InterruptedException {
1553       // This belongs in HTable!!! Not in here.  St.Ack
1554 
1555       // results must be the same size as list
1556       if (results.length != list.size()) {
1557         throw new IllegalArgumentException("argument results must be the same size as argument list");
1558       }
1559 
1560       processBatchCallback(list, tableName, pool, results, null);
1561     }
1562 
1563     /**
1564      * Executes the given
1565      * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call}
1566      * callable for each row in the
1567      * given list and invokes
1568      * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)}
1569      * for each result returned.
1570      *
1571      * @param protocol the protocol interface being called
1572      * @param rows a list of row keys for which the callable should be invoked
1573      * @param tableName table name for the coprocessor invoked
1574      * @param pool ExecutorService used to submit the calls per row
1575      * @param callable instance on which to invoke
1576      * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)}
1577      * for each row
1578      * @param callback instance on which to invoke
1579      * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)}
1580      * for each result
1581      * @param <T> the protocol interface type
1582      * @param <R> the callable's return type
1583      * @throws IOException
1584      */
1585     public <T extends CoprocessorProtocol,R> void processExecs(
1586         final Class<T> protocol,
1587         List<byte[]> rows,
1588         final byte[] tableName,
1589         ExecutorService pool,
1590         final Batch.Call<T,R> callable,
1591         final Batch.Callback<R> callback)
1592       throws IOException, Throwable {
1593 
1594       Map<byte[],Future<R>> futures =
1595           new TreeMap<byte[],Future<R>>(Bytes.BYTES_COMPARATOR);
1596       for (final byte[] r : rows) {
1597         final ExecRPCInvoker invoker =
1598             new ExecRPCInvoker(conf, this, protocol, tableName, r);
1599         Future<R> future = pool.submit(
1600             new Callable<R>() {
1601               public R call() throws Exception {
1602                 T instance = (T)Proxy.newProxyInstance(conf.getClassLoader(),
1603                     new Class[]{protocol},
1604                     invoker);
1605                 R result = callable.call(instance);
1606                 byte[] region = invoker.getRegionName();
1607                 if (callback != null) {
1608                   callback.update(region, r, result);
1609                 }
1610                 return result;
1611               }
1612             });
1613         futures.put(r, future);
1614       }
1615       for (Map.Entry<byte[],Future<R>> e : futures.entrySet()) {
1616         try {
1617           e.getValue().get();
1618         } catch (ExecutionException ee) {
1619           LOG.warn("Error executing for row "+Bytes.toStringBinary(e.getKey()), ee);
1620           throw ee.getCause();
1621         } catch (InterruptedException ie) {
1622           Thread.currentThread().interrupt();
1623           throw new IOException("Interrupted executing for row " +
1624               Bytes.toStringBinary(e.getKey()), ie);
1625         }
1626       }
1627     }
1628 
1629     /**
1630      * Parameterized batch processing, allowing varying return types for
1631      * different {@link Row} implementations.
1632      */
1633     public <R> void processBatchCallback(
1634         List<? extends Row> list,
1635         byte[] tableName,
1636         ExecutorService pool,
1637         Object[] results,
1638         Batch.Callback<R> callback)
1639     throws IOException, InterruptedException {
1640       // This belongs in HTable!!! Not in here.  St.Ack
1641 
1642       // results must be the same size as list
1643       if (results.length != list.size()) {
1644         throw new IllegalArgumentException(
1645             "argument results must be the same size as argument list");
1646       }
1647       if (list.isEmpty()) {
1648         return;
1649       }
1650 
1651       // Keep track of the most recent servers for any given item for better
1652       // exceptional reporting.  We keep HRegionLocation to save on parsing.
1653       // Later below when we use lastServers, we'll pull what we need from
1654       // lastServers.
1655       HRegionLocation [] lastServers = new HRegionLocation[results.length];
1656       List<Row> workingList = new ArrayList<Row>(list);
1657       boolean retry = true;
1658       // count that helps presize actions array
1659       int actionCount = 0;
1660 
1661       for (int tries = 0; tries < numRetries && retry; ++tries) {
1662 
1663         // sleep first, if this is a retry
1664         if (tries >= 1) {
1665           long sleepTime = ConnectionUtils.getPauseTime(this.pause, tries);
1666           LOG.debug("Retry " +tries+ ", sleep for " +sleepTime+ "ms!");
1667           Thread.sleep(sleepTime);
1668         }
1669         // step 1: break up into regionserver-sized chunks and build the data structs
1670         Map<HRegionLocation, MultiAction<R>> actionsByServer =
1671           new HashMap<HRegionLocation, MultiAction<R>>();
1672         for (int i = 0; i < workingList.size(); i++) {
1673           Row row = workingList.get(i);
1674           if (row != null) {
1675             HRegionLocation loc = locateRegion(tableName, row.getRow());
1676             byte[] regionName = loc.getRegionInfo().getRegionName();
1677 
1678             MultiAction<R> actions = actionsByServer.get(loc);
1679             if (actions == null) {
1680               actions = new MultiAction<R>();
1681               actionsByServer.put(loc, actions);
1682             }
1683 
1684             Action<R> action = new Action<R>(row, i);
1685             lastServers[i] = loc;
1686             actions.add(regionName, action);
1687           }
1688         }
1689 
1690         // step 2: make the requests
1691 
1692         Map<HRegionLocation, Future<MultiResponse>> futures =
1693             new HashMap<HRegionLocation, Future<MultiResponse>>(
1694                 actionsByServer.size());
1695 
1696         for (Entry<HRegionLocation, MultiAction<R>> e: actionsByServer.entrySet()) {
1697           futures.put(e.getKey(), pool.submit(createCallable(e.getKey(), e.getValue(), tableName)));
1698         }
1699 
1700         // step 3: collect the failures and successes and prepare for retry
1701 
1702         for (Entry<HRegionLocation, Future<MultiResponse>> responsePerServer
1703              : futures.entrySet()) {
1704           HRegionLocation loc = responsePerServer.getKey();
1705 
1706           try {
1707             Future<MultiResponse> future = responsePerServer.getValue();
1708             MultiResponse resp = future.get();
1709 
1710             if (resp == null) {
1711               // Entire server failed
1712               LOG.debug("Failed all for server: " + loc.getHostnamePort() +
1713                 ", removing from cache");
1714               continue;
1715             }
1716 
1717             for (Entry<byte[], List<Pair<Integer,Object>>> e : resp.getResults().entrySet()) {
1718               byte[] regionName = e.getKey();
1719               List<Pair<Integer, Object>> regionResults = e.getValue();
1720               for (Pair<Integer, Object> regionResult : regionResults) {
1721                 if (regionResult == null) {
1722                   // if the first/only record is 'null' the entire region failed.
1723                   LOG.debug("Failures for region: " +
1724                       Bytes.toStringBinary(regionName) +
1725                       ", removing from cache");
1726                 } else {
1727                   // Result might be an Exception, including DNRIOE
1728                   results[regionResult.getFirst()] = regionResult.getSecond();
1729                   if (callback != null && !(regionResult.getSecond() instanceof Throwable)) {
1730                     callback.update(e.getKey(),
1731                         list.get(regionResult.getFirst()).getRow(),
1732                         (R)regionResult.getSecond());
1733                   }
1734                 }
1735               }
1736             }
1737           } catch (ExecutionException e) {
1738             LOG.warn("Failed all from " + loc, e);
1739           }
1740         }
1741 
1742         // step 4: identify failures and prep for a retry (if applicable).
1743 
1744         // Find failures (i.e. null Result), and add them to the workingList (in
1745         // order), so they can be retried.
1746         retry = false;
1747         workingList.clear();
1748         actionCount = 0;
1749         for (int i = 0; i < results.length; i++) {
1750           // if null (fail) or instanceof Throwable && not instanceof DNRIOE
1751           // then retry that row. else dont.
1752           if (results[i] == null ||
1753               (results[i] instanceof Throwable &&
1754                   !(results[i] instanceof DoNotRetryIOException))) {
1755 
1756             retry = true;
1757             actionCount++;
1758             Row row = list.get(i);
1759             workingList.add(row);
1760             deleteCachedLocation(tableName, row.getRow());
1761           } else {
1762             if (results[i] != null && results[i] instanceof Throwable) {
1763               actionCount++;
1764             }
1765             // add null to workingList, so the order remains consistent with the original list argument.
1766             workingList.add(null);
1767           }
1768         }
1769       }
1770 
1771       List<Throwable> exceptions = new ArrayList<Throwable>(actionCount);
1772       List<Row> actions = new ArrayList<Row>(actionCount);
1773       List<String> addresses = new ArrayList<String>(actionCount);
1774 
1775       for (int i = 0 ; i < results.length; i++) {
1776         if (results[i] == null || results[i] instanceof Throwable) {
1777           exceptions.add((Throwable)results[i]);
1778           actions.add(list.get(i));
1779           addresses.add(lastServers[i].getHostnamePort());
1780         }
1781       }
1782 
1783       if (!exceptions.isEmpty()) {
1784         throw new RetriesExhaustedWithDetailsException(exceptions,
1785             actions,
1786             addresses);
1787       }
1788     }
1789 
1790     /*
1791      * Return the number of cached region for a table. It will only be called
1792      * from a unit test.
1793      */
1794     int getNumberOfCachedRegionLocations(final byte[] tableName) {
1795       synchronized (this.cachedRegionLocations) {
1796         Map<byte[], HRegionLocation> tableLocs =
1797           this.cachedRegionLocations.get(new HashedBytes(tableName));
1798 
1799         if (tableLocs == null) {
1800           return 0;
1801         }
1802         return tableLocs.values().size();
1803       }
1804     }
1805 
1806     /**
1807      * Check the region cache to see whether a region is cached yet or not.
1808      * Called by unit tests.
1809      * @param tableName tableName
1810      * @param row row
1811      * @return Region cached or not.
1812      */
1813     boolean isRegionCached(final byte[] tableName, final byte[] row) {
1814       HRegionLocation location = getCachedLocation(tableName, row);
1815       return location != null;
1816     }
1817 
1818     public void setRegionCachePrefetch(final byte[] tableName,
1819         final boolean enable) {
1820       if (!enable) {
1821         regionCachePrefetchDisabledTables.add(new HashedBytes(tableName));
1822       }
1823       else {
1824         regionCachePrefetchDisabledTables.remove(new HashedBytes(tableName));
1825       }
1826     }
1827 
1828     public boolean getRegionCachePrefetch(final byte[] tableName) {
1829       return !regionCachePrefetchDisabledTables.contains(new HashedBytes(tableName));
1830     }
1831 
1832     @Override
1833     public void prewarmRegionCache(byte[] tableName,
1834         Map<HRegionInfo, HServerAddress> regions) {
1835       for (Map.Entry<HRegionInfo, HServerAddress> e : regions.entrySet()) {
1836         HServerAddress hsa = e.getValue();
1837         if (hsa == null || hsa.getInetSocketAddress() == null) continue;
1838         cacheLocation(tableName,
1839           new HRegionLocation(e.getKey(), hsa.getHostname(), hsa.getPort()));
1840       }
1841     }
1842 
1843     @Override
1844     public void abort(final String msg, Throwable t) {
1845       if (t instanceof KeeperException) {
1846         LOG.info("This client just lost it's session with ZooKeeper, will"
1847             + " automatically reconnect when needed.");
1848         if (t instanceof KeeperException.SessionExpiredException) {
1849           LOG.info("ZK session expired. This disconnect could have been" +
1850               " caused by a network partition or a long-running GC pause," +
1851               " either way it's recommended that you verify your environment.");
1852           synchronized (resetLock) {
1853             if (resetting) return;
1854             this.resetting = true;
1855           }
1856           resetZooKeeperTrackers();
1857           this.resetting = false;
1858         }
1859         return;
1860       }
1861       if (t != null) LOG.fatal(msg, t);
1862       else LOG.fatal(msg);
1863       this.aborted = true;
1864       close();
1865     }
1866 
1867     @Override
1868     public boolean isClosed() {
1869       return this.closed;
1870     }
1871 
1872     @Override
1873     public boolean isAborted(){
1874       return this.aborted;
1875     }
1876 
1877     public int getCurrentNrHRS() throws IOException {
1878       try {
1879         ZooKeeperWatcher zkw = getZooKeeperWatcher();
1880         // We go to zk rather than to master to get count of regions to avoid
1881         // HTable having a Master dependency.  See HBase-2828
1882         return ZKUtil.getNumberOfChildren(zkw,
1883             zkw.rsZNode);
1884       } catch (KeeperException ke) {
1885         throw new IOException("Unexpected ZooKeeper exception", ke);
1886       }
1887     }
1888 
1889     /**
1890      * Increment this client's reference count.
1891      */
1892     void incCount() {
1893       ++refCount;
1894     }
1895 
1896     /**
1897      * Decrement this client's reference count.
1898      */
1899     void decCount() {
1900       if (refCount > 0) {
1901         --refCount;
1902       }
1903     }
1904 
1905     /**
1906      * Return if this client has no reference
1907      *
1908      * @return true if this client has no reference; false otherwise
1909      */
1910     boolean isZeroReference() {
1911       return refCount == 0;
1912     }
1913 
1914     void internalClose() {
1915       if (this.closed) {
1916         return;
1917       }
1918       shutdownBatchPool();
1919       master = null;
1920 
1921       this.servers.clear();
1922       if (this.rpcEngine != null) {
1923         this.rpcEngine.close();
1924       }
1925 
1926       synchronized (this) {
1927         if (this.zooKeeper != null) {
1928           LOG.info("Closed zookeeper sessionid=0x" +
1929             Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()));
1930           this.zooKeeper.close();
1931           this.zooKeeper = null;
1932         }
1933         this.closed = true;
1934       }
1935     }
1936 
1937     public void close() {
1938       if (managed) {
1939         if (aborted) {
1940           HConnectionManager.deleteStaleConnection(this);
1941         } else {
1942           HConnectionManager.deleteConnection(this, false);
1943         }
1944       } else {
1945         internalClose();
1946       }
1947       if (LOG.isTraceEnabled()) LOG.debug("" + this.zooKeeper + " closed.");
1948     }
1949 
1950     /**
1951      * Close the connection for good, regardless of what the current value of
1952      * {@link #refCount} is. Ideally, {@link #refCount} should be zero at this
1953      * point, which would be the case if all of its consumers close the
1954      * connection. However, on the off chance that someone is unable to close
1955      * the connection, perhaps because it bailed out prematurely, the method
1956      * below will ensure that this {@link HConnection} instance is cleaned up.
1957      * Caveat: The JVM may take an unknown amount of time to call finalize on an
1958      * unreachable object, so our hope is that every consumer cleans up after
1959      * itself, like any good citizen.
1960      */
1961     @Override
1962     protected void finalize() throws Throwable {
1963       // Pretend as if we are about to release the last remaining reference
1964       refCount = 1;
1965       close();
1966       LOG.debug("The connection to " + this.zooKeeper
1967           + " was closed by the finalize method.");
1968     }
1969 
1970     public HTableDescriptor[] listTables() throws IOException {
1971       HTableDescriptor[] htd = getMaster().getHTableDescriptors();
1972       return htd;
1973     }
1974 
1975     public HTableDescriptor[] getHTableDescriptors(List<String> tableNames) throws IOException {
1976       if (tableNames == null || tableNames.isEmpty()) return new HTableDescriptor[0];
1977       if (tableNames == null || tableNames.size() == 0) return null;
1978       return getMaster().getHTableDescriptors(tableNames);
1979     }
1980 
1981     @Override
1982     public String[] getTableNames() throws IOException {
1983       return getMaster().getTableNames();
1984     }
1985 
1986     public HTableDescriptor getHTableDescriptor(final byte[] tableName)
1987     throws IOException {
1988       if (tableName == null || tableName.length == 0) return null;
1989       if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) {
1990         return new UnmodifyableHTableDescriptor(HTableDescriptor.ROOT_TABLEDESC);
1991       }
1992       if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
1993         return HTableDescriptor.META_TABLEDESC;
1994       }
1995       List<String> tableNameList = new ArrayList<String>(1);
1996       tableNameList.add(Bytes.toString(tableName));
1997       HTableDescriptor[] htds = getHTableDescriptors(tableNameList);
1998       if (htds != null && htds.length > 0) {
1999         return htds[0];
2000       }
2001       throw new TableNotFoundException(Bytes.toString(tableName));
2002     }
2003   }
2004 
2005   /**
2006    * Set the number of retries to use serverside when trying to communicate
2007    * with another server over {@link HConnection}.  Used updating catalog
2008    * tables, etc.  Call this method before we create any Connections.
2009    * @param c The Configuration instance to set the retries into.
2010    * @param log Used to log what we set in here.
2011    */
2012   public static void setServerSideHConnectionRetries(final Configuration c,
2013       final Log log) {
2014     int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2015       HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2016     // Go big.  Multiply by 10.  If we can't get to meta after this many retries
2017     // then something seriously wrong.
2018     int serversideMultiplier =
2019       c.getInt("hbase.client.serverside.retries.multiplier", 10);
2020     int retries = hcRetries * serversideMultiplier;
2021     c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
2022     log.debug("Set serverside HConnection retries=" + retries);
2023   }
2024 }