View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.client.metrics;
20  
21  import java.util.concurrent.atomic.AtomicLong;
22  
23  import org.apache.hadoop.hbase.classification.InterfaceAudience;
24  import org.apache.hadoop.hbase.classification.InterfaceStability;
25  
26  
27  /**
28   * Provides metrics related to scan operations (both server side and client side metrics).
29   * <p>
30   * The data can be passed to mapreduce framework or other systems.
31   * We use atomic longs so that one thread can increment,
32   * while another atomically resets to zero after the values are reported
33   * to hadoop's counters.
34   * <p>
35   * Some of these metrics are general for any client operation such as put
36   * However, there is no need for this. So they are defined under scan operation
37   * for now.
38   */
39  @InterfaceAudience.Public
40  @InterfaceStability.Evolving
41  public class ScanMetrics extends ServerSideScanMetrics {
42  
43    // AtomicLongs to hold the metrics values. These are all updated through ClientScanner and
44    // ScannerCallable. They are atomic longs so that atomic getAndSet can be used to reset the
45    // values after progress is passed to hadoop's counters.
46  
47    /**
48     * number of RPC calls
49     */
50    public final AtomicLong countOfRPCcalls = createCounter("RPC_CALLS");
51  
52    /**
53     * number of remote RPC calls
54     */
55    public final AtomicLong countOfRemoteRPCcalls = createCounter("REMOTE_RPC_CALLS");
56  
57    /**
58     * sum of milliseconds between sequential next calls
59     */
60    public final AtomicLong sumOfMillisSecBetweenNexts = createCounter("MILLIS_BETWEEN_NEXTS");
61  
62    /**
63     * number of NotServingRegionException caught
64     */
65    public final AtomicLong countOfNSRE = createCounter("NOT_SERVING_REGION_EXCEPTION");
66  
67    /**
68     * number of bytes in Result objects from region servers
69     */
70    public final AtomicLong countOfBytesInResults = createCounter("BYTES_IN_RESULTS");
71  
72    /**
73     * number of bytes in Result objects from remote region servers
74     */
75    public final AtomicLong countOfBytesInRemoteResults = createCounter("BYTES_IN_REMOTE_RESULTS");
76  
77    /**
78     * number of regions
79     */
80    public final AtomicLong countOfRegions = createCounter("REGIONS_SCANNED");
81  
82    /**
83     * number of RPC retries
84     */
85    public final AtomicLong countOfRPCRetries = createCounter("RPC_RETRIES");
86  
87    /**
88     * number of remote RPC retries
89     */
90    public final AtomicLong countOfRemoteRPCRetries = createCounter("REMOTE_RPC_RETRIES");
91  
92    /**
93     * constructor
94     */
95    public ScanMetrics() {
96    }
97  }