View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   * http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.ipc;
20  
21  import java.io.IOException;
22  
23  import org.apache.commons.logging.Log;
24  import org.apache.commons.logging.LogFactory;
25  import org.apache.hadoop.hbase.HConstants;
26  import org.apache.hadoop.hbase.classification.InterfaceAudience;
27  import org.apache.hadoop.hbase.client.ClusterConnection;
28  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
29  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
30  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
31  import org.apache.hadoop.hbase.util.ByteStringer;
32  
33  import com.google.protobuf.Descriptors;
34  import com.google.protobuf.Message;
35  import com.google.protobuf.RpcController;
36  
37  /**
38   * Provides clients with an RPC connection to call coprocessor endpoint {@link com.google.protobuf.Service}s
39   * against the active master.  An instance of this class may be obtained
40   * by calling {@link org.apache.hadoop.hbase.client.HBaseAdmin#coprocessorService()},
41   * but should normally only be used in creating a new {@link com.google.protobuf.Service} stub to call the endpoint
42   * methods.
43   * @see org.apache.hadoop.hbase.client.HBaseAdmin#coprocessorService()
44   */
45  @InterfaceAudience.Private
46  public class MasterCoprocessorRpcChannel extends CoprocessorRpcChannel{
47    private static final Log LOG = LogFactory.getLog(MasterCoprocessorRpcChannel.class);
48  
49    private final ClusterConnection connection;
50  
51    public MasterCoprocessorRpcChannel(ClusterConnection conn) {
52      this.connection = conn;
53    }
54  
55    @Override
56    protected Message callExecService(RpcController controller, Descriptors.MethodDescriptor method,
57                                    Message request, Message responsePrototype)
58        throws IOException {
59      if (LOG.isTraceEnabled()) {
60        LOG.trace("Call: "+method.getName()+", "+request.toString());
61      }
62  
63      final ClientProtos.CoprocessorServiceCall call =
64          ClientProtos.CoprocessorServiceCall.newBuilder()
65              .setRow(ByteStringer.wrap(HConstants.EMPTY_BYTE_ARRAY))
66              .setServiceName(method.getService().getFullName())
67              .setMethodName(method.getName())
68              .setRequest(request.toByteString()).build();
69  
70      // TODO: Are we retrying here? Does not seem so. We should use RetryingRpcCaller
71      CoprocessorServiceResponse result = ProtobufUtil.execService(controller,
72        connection.getMaster(), call);
73      Message response = null;
74      if (result.getValue().hasValue()) {
75        Message.Builder builder = responsePrototype.newBuilderForType();
76        ProtobufUtil.mergeFrom(builder, result.getValue().getValue());
77        response = builder.build();
78      } else {
79        response = responsePrototype.getDefaultInstanceForType();
80      }
81      if (LOG.isTraceEnabled()) {
82        LOG.trace("Master Result is value=" + response);
83      }
84      return response;
85    }
86  
87  }