1 /** 2 * Licensed to the Apache Software Foundation (ASF) under one 3 * or more contributor license agreements. See the NOTICE file 4 * distributed with this work for additional information 5 * regarding copyright ownership. The ASF licenses this file 6 * to you under the Apache License, Version 2.0 (the 7 * "License"); you may not use this file except in compliance 8 * with the License. You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 package org.apache.hadoop.hbase.ipc; 19 20 import com.google.protobuf.BlockingRpcChannel; 21 import org.apache.hadoop.hbase.ServerName; 22 import org.apache.hadoop.hbase.classification.InterfaceAudience; 23 import org.apache.hadoop.hbase.security.User; 24 25 import java.io.Closeable; 26 import java.io.IOException; 27 28 /** 29 * Interface for RpcClient implementations so ConnectionManager can handle it. 30 */ 31 @InterfaceAudience.Private public interface RpcClient extends Closeable { 32 public final static String FAILED_SERVER_EXPIRY_KEY = "hbase.ipc.client.failed.servers.expiry"; 33 public final static int FAILED_SERVER_EXPIRY_DEFAULT = 2000; 34 public final static String IDLE_TIME = "hbase.ipc.client.connection.minIdleTimeBeforeClose"; 35 public static final String IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY = 36 "hbase.ipc.client.fallback-to-simple-auth-allowed"; 37 public static final boolean IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT = false; 38 public static final String SPECIFIC_WRITE_THREAD = "hbase.ipc.client.specificThreadForWriting"; 39 public static final String DEFAULT_CODEC_CLASS = "hbase.client.default.rpc.codec"; 40 41 public final static String SOCKET_TIMEOUT_CONNECT = "hbase.ipc.client.socket.timeout.connect"; 42 /** 43 * How long we wait when we wait for an answer. It's not the operation time, it's the time 44 * we wait when we start to receive an answer, when the remote write starts to send the data. 45 */ 46 public final static String SOCKET_TIMEOUT_READ = "hbase.ipc.client.socket.timeout.read"; 47 public final static String SOCKET_TIMEOUT_WRITE = "hbase.ipc.client.socket.timeout.write"; 48 public final static int DEFAULT_SOCKET_TIMEOUT_CONNECT = 10000; // 10 seconds 49 public final static int DEFAULT_SOCKET_TIMEOUT_READ = 20000; // 20 seconds 50 public final static int DEFAULT_SOCKET_TIMEOUT_WRITE = 60000; // 60 seconds 51 52 // Used by the server, for compatibility with old clients. 53 // The client in 0.99+ does not ping the server. 54 final static int PING_CALL_ID = -1; 55 56 /** 57 * Creates a "channel" that can be used by a blocking protobuf service. Useful setting up 58 * protobuf blocking stubs. 59 * 60 * @param sn server name describing location of server 61 * @param user which is to use the connection 62 * @param rpcTimeout default rpc operation timeout 63 * 64 * @return A blocking rpc channel that goes via this rpc client instance. 65 * @throws IOException when channel could not be created 66 */ 67 public BlockingRpcChannel createBlockingRpcChannel(ServerName sn, User user, 68 int rpcTimeout) throws IOException; 69 70 /** 71 * Interrupt the connections to the given server. This should be called if the server 72 * is known as actually dead. This will not prevent current operation to be retried, and, 73 * depending on their own behavior, they may retry on the same server. This can be a feature, 74 * for example at startup. In any case, they're likely to get connection refused (if the 75 * process died) or no route to host: i.e. their next retries should be faster and with a 76 * safe exception. 77 * @param sn server location to cancel connections of 78 */ 79 public void cancelConnections(ServerName sn); 80 81 /** 82 * Stop all threads related to this client. No further calls may be made 83 * using this client. 84 */ 85 @Override public void close(); 86 87 /** 88 * @return true when this client uses a {@link org.apache.hadoop.hbase.codec.Codec} and so 89 * supports cell blocks. 90 */ 91 boolean hasCellBlockSupport(); 92 }