View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import java.io.Closeable;
22  import java.io.IOException;
23  import java.io.InterruptedIOException;
24  import java.lang.reflect.Constructor;
25  import java.lang.reflect.UndeclaredThrowableException;
26  import java.net.SocketException;
27  import java.util.ArrayList;
28  import java.util.Date;
29  import java.util.HashSet;
30  import java.util.LinkedHashMap;
31  import java.util.List;
32  import java.util.Map;
33  import java.util.Map.Entry;
34  import java.util.NavigableMap;
35  import java.util.Set;
36  import java.util.concurrent.ConcurrentHashMap;
37  import java.util.concurrent.ConcurrentMap;
38  import java.util.concurrent.ConcurrentSkipListMap;
39  import java.util.concurrent.ConcurrentSkipListSet;
40  import java.util.concurrent.CopyOnWriteArraySet;
41  import java.util.concurrent.ExecutorService;
42  import java.util.concurrent.LinkedBlockingQueue;
43  import java.util.concurrent.ThreadPoolExecutor;
44  import java.util.concurrent.TimeUnit;
45  import java.util.concurrent.atomic.AtomicBoolean;
46  import java.util.concurrent.atomic.AtomicInteger;
47  
48  import org.apache.commons.logging.Log;
49  import org.apache.commons.logging.LogFactory;
50  import org.apache.hadoop.classification.InterfaceAudience;
51  import org.apache.hadoop.classification.InterfaceStability;
52  import org.apache.hadoop.conf.Configuration;
53  import org.apache.hadoop.hbase.Chore;
54  import org.apache.hadoop.hbase.HBaseConfiguration;
55  import org.apache.hadoop.hbase.HConstants;
56  import org.apache.hadoop.hbase.HRegionInfo;
57  import org.apache.hadoop.hbase.HRegionLocation;
58  import org.apache.hadoop.hbase.HTableDescriptor;
59  import org.apache.hadoop.hbase.MasterNotRunningException;
60  import org.apache.hadoop.hbase.RegionTooBusyException;
61  import org.apache.hadoop.hbase.ServerName;
62  import org.apache.hadoop.hbase.Stoppable;
63  import org.apache.hadoop.hbase.TableName;
64  import org.apache.hadoop.hbase.TableNotEnabledException;
65  import org.apache.hadoop.hbase.TableNotFoundException;
66  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
67  import org.apache.hadoop.hbase.client.AsyncProcess.AsyncRequestFuture;
68  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
69  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
70  import org.apache.hadoop.hbase.client.coprocessor.Batch;
71  import org.apache.hadoop.hbase.exceptions.RegionMovedException;
72  import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
73  import org.apache.hadoop.hbase.ipc.RpcClient;
74  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
75  import org.apache.hadoop.hbase.protobuf.RequestConverter;
76  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
77  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
78  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
79  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
80  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*;
81  import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
82  import org.apache.hadoop.hbase.security.User;
83  import org.apache.hadoop.hbase.security.UserProvider;
84  import org.apache.hadoop.hbase.util.Bytes;
85  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
86  import org.apache.hadoop.hbase.util.ExceptionUtil;
87  import org.apache.hadoop.hbase.util.Threads;
88  import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
89  import org.apache.hadoop.hbase.zookeeper.ZKUtil;
90  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
91  import org.apache.hadoop.ipc.RemoteException;
92  import org.apache.zookeeper.KeeperException;
93  
94  import com.google.common.annotations.VisibleForTesting;
95  import com.google.protobuf.BlockingRpcChannel;
96  import com.google.protobuf.RpcController;
97  import com.google.protobuf.ServiceException;
98  
99  /**
100  * A non-instantiable class that manages creation of {@link HConnection}s.
101  * <p>The simplest way to use this class is by using {@link #createConnection(Configuration)}.
102  * This creates a new {@link HConnection} to the cluster that is managed by the caller.
103  * From this {@link HConnection} {@link HTableInterface} implementations are retrieved
104  * with {@link HConnection#getTable(byte[])}. Example:
105  * <pre>
106  * {@code
107  * HConnection connection = HConnectionManager.createConnection(config);
108  * HTableInterface table = connection.getTable("table1");
109  * try {
110  *   // Use the table as needed, for a single operation and a single thread
111  * } finally {
112  *   table.close();
113  *   connection.close();
114  * }
115  * }</pre>
116  * <p>The following logic and API will be removed in the future:
117  * <p>This class has a static Map of {@link HConnection} instances keyed by
118  * {@link Configuration}; all invocations of {@link #getConnection(Configuration)}
119  * that pass the same {@link Configuration} instance will be returned the same
120  * {@link  HConnection} instance (Adding properties to a Configuration
121  * instance does not change its object identity; for more on how this is done see
122  * {@link HConnectionKey}).  Sharing {@link HConnection}
123  * instances is usually what you want; all clients of the {@link HConnection}
124  * instances share the HConnections' cache of Region locations rather than each
125  * having to discover for itself the location of meta, etc.  It makes
126  * sense for the likes of the pool of HTables class {@link HTablePool}, for
127  * instance (If concerned that a single {@link HConnection} is insufficient
128  * for sharing amongst clients in say an heavily-multithreaded environment,
129  * in practise its not proven to be an issue.  Besides, {@link HConnection} is
130  * implemented atop Hadoop RPC and as of this writing, Hadoop RPC does a
131  * connection per cluster-member, exclusively).
132  *
133  * <p>But sharing connections makes clean up of {@link HConnection} instances a little awkward.
134  * Currently, clients cleanup by calling {@link #deleteConnection(Configuration)}. This will
135  * shutdown the zookeeper connection the HConnection was using and clean up all
136  * HConnection resources as well as stopping proxies to servers out on the
137  * cluster. Not running the cleanup will not end the world; it'll
138  * just stall the closeup some and spew some zookeeper connection failed
139  * messages into the log.  Running the cleanup on a {@link HConnection} that is
140  * subsequently used by another will cause breakage so be careful running
141  * cleanup.
142  * <p>To create a {@link HConnection} that is not shared by others, you can
143  * create a new {@link Configuration} instance, pass this new instance to
144  * {@link #getConnection(Configuration)}, and then when done, close it up by
145  * doing something like the following:
146  * <pre>
147  * {@code
148  * Configuration newConfig = new Configuration(originalConf);
149  * HConnection connection = HConnectionManager.getConnection(newConfig);
150  * // Use the connection to your hearts' delight and then when done...
151  * HConnectionManager.deleteConnection(newConfig, true);
152  * }
153  * </pre>
154  * <p>Cleanup used to be done inside in a shutdown hook.  On startup we'd
155  * register a shutdown hook that called {@link #deleteAllConnections()}
156  * on its way out but the order in which shutdown hooks run is not defined so
157  * were problematic for clients of HConnection that wanted to register their
158  * own shutdown hooks so we removed ours though this shifts the onus for
159  * cleanup to the client.
160  */
161 @SuppressWarnings("serial")
162 @InterfaceAudience.Public
163 @InterfaceStability.Evolving
164 public class HConnectionManager {
165 
166   @Deprecated
167   public static final String RETRIES_BY_SERVER_KEY =
168       ConnectionManager.RETRIES_BY_SERVER_KEY;
169 
170   @Deprecated
171   public static final int MAX_CACHED_CONNECTION_INSTANCES =
172       ConnectionManager.MAX_CACHED_CONNECTION_INSTANCES;
173 
174   /*
175    * Non-instantiable.
176    */
177   private HConnectionManager() {
178     super();
179   }
180 
181   /**
182    * Get the connection that goes with the passed <code>conf</code> configuration instance.
183    * If no current connection exists, method creates a new connection and keys it using
184    * connection-specific properties from the passed {@link Configuration}; see
185    * {@link HConnectionKey}.
186    * @param conf configuration
187    * @return HConnection object for <code>conf</code>
188    * @throws ZooKeeperConnectionException
189    */
190   public static HConnection getConnection(final Configuration conf) throws IOException {
191     return ConnectionManager.getConnectionInternal(conf);
192   }
193 
194   /**
195    * Create a new HConnection instance using the passed <code>conf</code> instance.
196    * <p>Note: This bypasses the usual HConnection life cycle management done by
197    * {@link #getConnection(Configuration)}. The caller is responsible for
198    * calling {@link HConnection#close()} on the returned connection instance.
199    *
200    * This is the recommended way to create HConnections.
201    * {@code
202    * HConnection connection = HConnectionManager.createConnection(conf);
203    * HTableInterface table = connection.getTable("mytable");
204    * table.get(...);
205    * ...
206    * table.close();
207    * connection.close();
208    * }
209    *
210    * @param conf configuration
211    * @return HConnection object for <code>conf</code>
212    * @throws ZooKeeperConnectionException
213    */
214   public static HConnection createConnection(Configuration conf) throws IOException {
215     return ConnectionManager.createConnectionInternal(conf);
216   }
217 
218 
219   /**
220    * Create a new HConnection instance using the passed <code>conf</code> instance.
221    * <p>Note: This bypasses the usual HConnection life cycle management done by
222    * {@link #getConnection(Configuration)}. The caller is responsible for
223    * calling {@link HConnection#close()} on the returned connection instance.
224    * This is the recommended way to create HConnections.
225    * {@code
226    * ExecutorService pool = ...;
227    * HConnection connection = HConnectionManager.createConnection(conf, pool);
228    * HTableInterface table = connection.getTable("mytable");
229    * table.get(...);
230    * ...
231    * table.close();
232    * connection.close();
233    * }
234    * @param conf configuration
235    * @param pool the thread pool to use for batch operation in HTables used via this HConnection
236    * @return HConnection object for <code>conf</code>
237    * @throws ZooKeeperConnectionException
238    */
239   public static HConnection createConnection(Configuration conf, ExecutorService pool)
240       throws IOException {
241     return ConnectionManager.createConnection(conf, pool);
242   }
243 
244   /**
245    * Create a new HConnection instance using the passed <code>conf</code> instance.
246    * <p>Note: This bypasses the usual HConnection life cycle management done by
247    * {@link #getConnection(Configuration)}. The caller is responsible for
248    * calling {@link HConnection#close()} on the returned connection instance.
249    * This is the recommended way to create HConnections.
250    * {@code
251    * ExecutorService pool = ...;
252    * HConnection connection = HConnectionManager.createConnection(conf, pool);
253    * HTableInterface table = connection.getTable("mytable");
254    * table.get(...);
255    * ...
256    * table.close();
257    * connection.close();
258    * }
259    * @param conf configuration
260    * @param user the user the connection is for
261    * @return HConnection object for <code>conf</code>
262    * @throws ZooKeeperConnectionException
263    */
264   public static HConnection createConnection(Configuration conf, User user)
265   throws IOException {
266     return ConnectionManager.createConnection(conf, user);
267   }
268 
269   /**
270    * Create a new HConnection instance using the passed <code>conf</code> instance.
271    * <p>Note: This bypasses the usual HConnection life cycle management done by
272    * {@link #getConnection(Configuration)}. The caller is responsible for
273    * calling {@link HConnection#close()} on the returned connection instance.
274    * This is the recommended way to create HConnections.
275    * {@code
276    * ExecutorService pool = ...;
277    * HConnection connection = HConnectionManager.createConnection(conf, pool);
278    * HTableInterface table = connection.getTable("mytable");
279    * table.get(...);
280    * ...
281    * table.close();
282    * connection.close();
283    * }
284    * @param conf configuration
285    * @param pool the thread pool to use for batch operation in HTables used via this HConnection
286    * @param user the user the connection is for
287    * @return HConnection object for <code>conf</code>
288    * @throws ZooKeeperConnectionException
289    */
290   public static HConnection createConnection(Configuration conf, ExecutorService pool, User user)
291   throws IOException {
292     return ConnectionManager.createConnection(conf, pool, user);
293   }
294 
295   @Deprecated
296   static HConnection createConnection(final Configuration conf, final boolean managed)
297       throws IOException {
298     return ConnectionManager.createConnection(conf, managed);
299   }
300 
301   @Deprecated
302   static ClusterConnection createConnection(final Configuration conf, final boolean managed,
303       final ExecutorService pool, final User user) throws IOException {
304     return ConnectionManager.createConnection(conf, managed, pool, user);
305   }
306 
307   /**
308    * Delete connection information for the instance specified by passed configuration.
309    * If there are no more references to the designated connection connection, this method will
310    * then close connection to the zookeeper ensemble and let go of all associated resources.
311    *
312    * @param conf configuration whose identity is used to find {@link HConnection} instance.
313    * @deprecated
314    */
315   public static void deleteConnection(Configuration conf) {
316     ConnectionManager.deleteConnection(conf);
317   }
318 
319   /**
320    * Cleanup a known stale connection.
321    * This will then close connection to the zookeeper ensemble and let go of all resources.
322    *
323    * @param connection
324    * @deprecated
325    */
326   public static void deleteStaleConnection(HConnection connection) {
327     ConnectionManager.deleteStaleConnection(connection);
328   }
329 
330   /**
331    * Delete information for all connections. Close or not the connection, depending on the
332    *  staleConnection boolean and the ref count. By default, you should use it with
333    *  staleConnection to true.
334    * @deprecated
335    */
336   public static void deleteAllConnections(boolean staleConnection) {
337     ConnectionManager.deleteAllConnections(staleConnection);
338   }
339 
340   /**
341    * Delete information for all connections..
342    * @deprecated kept for backward compatibility, but the behavior is broken. HBASE-8983
343    */
344   @Deprecated
345   public static void deleteAllConnections() {
346     ConnectionManager.deleteAllConnections();
347   }
348 
349   /**
350    * This convenience method invokes the given {@link HConnectable#connect}
351    * implementation using a {@link HConnection} instance that lasts just for the
352    * duration of the invocation.
353    *
354    * @param <T> the return type of the connect method
355    * @param connectable the {@link HConnectable} instance
356    * @return the value returned by the connect method
357    * @throws IOException
358    * @deprecated Internal method, do not use thru HConnectionManager.
359    */
360   @InterfaceAudience.Private
361   @Deprecated
362   public static <T> T execute(HConnectable<T> connectable) throws IOException {
363     return ConnectionManager.execute(connectable);
364   }
365 
366   /**
367    * Set the number of retries to use serverside when trying to communicate
368    * with another server over {@link HConnection}.  Used updating catalog
369    * tables, etc.  Call this method before we create any Connections.
370    * @param c The Configuration instance to set the retries into.
371    * @param log Used to log what we set in here.
372    * @deprecated Internal method, do not use.
373    */
374   @InterfaceAudience.Private
375   @Deprecated
376   public static void setServerSideHConnectionRetries(
377       final Configuration c, final String sn, final Log log) {
378     ConnectionUtils.setServerSideHConnectionRetriesConfig(c, sn, log);
379   }
380 }