001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.coprocessor; 019 020import java.io.IOException; 021import java.util.List; 022import java.util.Optional; 023import java.util.Set; 024import java.util.concurrent.ConcurrentHashMap; 025import org.apache.commons.lang3.StringUtils; 026import org.apache.hadoop.conf.Configuration; 027import org.apache.hadoop.hbase.Cell; 028import org.apache.hadoop.hbase.CoprocessorEnvironment; 029import org.apache.hadoop.hbase.TableName; 030import org.apache.hadoop.hbase.client.Delete; 031import org.apache.hadoop.hbase.client.Durability; 032import org.apache.hadoop.hbase.client.Get; 033import org.apache.hadoop.hbase.client.Put; 034import org.apache.hadoop.hbase.client.Row; 035import org.apache.hadoop.hbase.ipc.RpcServer; 036import org.apache.hadoop.hbase.metrics.MetricRegistry; 037import org.apache.hadoop.hbase.util.Bytes; 038import org.apache.hadoop.hbase.util.LossyCounting; 039import org.apache.hadoop.hbase.wal.WALEdit; 040import org.apache.yetus.audience.InterfaceAudience; 041 042import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; 043 044/** 045 * A coprocessor that collects metrics from meta table. 046 * <p> 047 * These metrics will be available through the regular Hadoop metrics2 sinks (ganglia, opentsdb, 048 * etc) as well as JMX output. 049 * </p> 050 * @see MetaTableMetrics 051 */ 052 053@InterfaceAudience.Private 054public class MetaTableMetrics implements RegionCoprocessor { 055 056 private ExampleRegionObserverMeta observer; 057 private MetricRegistry registry; 058 private LossyCounting<String> clientMetricsLossyCounting, regionMetricsLossyCounting; 059 private boolean active = false; 060 private Set<String> metrics = ConcurrentHashMap.newKeySet(); 061 062 enum MetaTableOps { 063 GET, 064 PUT, 065 DELETE, 066 } 067 068 private ImmutableMap<Class<? extends Row>, MetaTableOps> opsNameMap = 069 ImmutableMap.<Class<? extends Row>, MetaTableOps> builder().put(Put.class, MetaTableOps.PUT) 070 .put(Get.class, MetaTableOps.GET).put(Delete.class, MetaTableOps.DELETE).build(); 071 072 class ExampleRegionObserverMeta implements RegionCoprocessor, RegionObserver { 073 074 @Override 075 public Optional<RegionObserver> getRegionObserver() { 076 return Optional.of(this); 077 } 078 079 @Override 080 public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get, 081 List<Cell> results) throws IOException { 082 registerAndMarkMetrics(e, get); 083 } 084 085 @Override 086 public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit, 087 Durability durability) throws IOException { 088 registerAndMarkMetrics(e, put); 089 } 090 091 @Override 092 public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, 093 WALEdit edit, Durability durability) { 094 registerAndMarkMetrics(e, delete); 095 } 096 097 private void registerAndMarkMetrics(ObserverContext<RegionCoprocessorEnvironment> e, Row row) { 098 if (!active || !isMetaTableOp(e)) { 099 return; 100 } 101 tableMetricRegisterAndMark(row); 102 clientMetricRegisterAndMark(); 103 regionMetricRegisterAndMark(row); 104 opMetricRegisterAndMark(row); 105 opWithClientMetricRegisterAndMark(row); 106 } 107 108 /** 109 * Get table name from Ops such as: get, put, delete. 110 * @param op such as get, put or delete. 111 */ 112 private String getTableNameFromOp(Row op) { 113 final String tableRowKey = Bytes.toString(op.getRow()); 114 if (StringUtils.isEmpty(tableRowKey)) { 115 return null; 116 } 117 final String[] splits = tableRowKey.split(","); 118 return splits.length > 0 ? splits[0] : null; 119 } 120 121 /** 122 * Get regionId from Ops such as: get, put, delete. 123 * @param op such as get, put or delete. 124 */ 125 private String getRegionIdFromOp(Row op) { 126 final String tableRowKey = Bytes.toString(op.getRow()); 127 if (StringUtils.isEmpty(tableRowKey)) { 128 return null; 129 } 130 final String[] splits = tableRowKey.split(","); 131 return splits.length > 2 ? splits[2] : null; 132 } 133 134 private boolean isMetaTableOp(ObserverContext<RegionCoprocessorEnvironment> e) { 135 return TableName.META_TABLE_NAME.equals(e.getEnvironment().getRegionInfo().getTable()); 136 } 137 138 private void clientMetricRegisterAndMark() { 139 // Mark client metric 140 String clientIP = RpcServer.getRemoteIp() != null ? RpcServer.getRemoteIp().toString() : null; 141 if (clientIP == null || clientIP.isEmpty()) { 142 return; 143 } 144 String clientRequestMeter = clientRequestMeterName(clientIP); 145 clientMetricsLossyCounting.add(clientRequestMeter); 146 registerAndMarkMeter(clientRequestMeter); 147 } 148 149 private void tableMetricRegisterAndMark(Row op) { 150 // Mark table metric 151 String tableName = getTableNameFromOp(op); 152 if (tableName == null || tableName.isEmpty()) { 153 return; 154 } 155 String tableRequestMeter = tableMeterName(tableName); 156 registerAndMarkMeter(tableRequestMeter); 157 } 158 159 private void regionMetricRegisterAndMark(Row op) { 160 // Mark region metric 161 String regionId = getRegionIdFromOp(op); 162 if (regionId == null || regionId.isEmpty()) { 163 return; 164 } 165 String regionRequestMeter = regionMeterName(regionId); 166 regionMetricsLossyCounting.add(regionRequestMeter); 167 registerAndMarkMeter(regionRequestMeter); 168 } 169 170 private void opMetricRegisterAndMark(Row op) { 171 // Mark access type ["get", "put", "delete"] metric 172 String opMeterName = opMeterName(op); 173 if (opMeterName == null || opMeterName.isEmpty()) { 174 return; 175 } 176 registerAndMarkMeter(opMeterName); 177 } 178 179 private void opWithClientMetricRegisterAndMark(Object op) { 180 // // Mark client + access type metric 181 String opWithClientMeterName = opWithClientMeterName(op); 182 if (opWithClientMeterName == null || opWithClientMeterName.isEmpty()) { 183 return; 184 } 185 registerAndMarkMeter(opWithClientMeterName); 186 } 187 188 // Helper function to register and mark meter if not present 189 private void registerAndMarkMeter(String requestMeter) { 190 if (requestMeter.isEmpty()) { 191 return; 192 } 193 if (!registry.get(requestMeter).isPresent()) { 194 metrics.add(requestMeter); 195 } 196 registry.meter(requestMeter).mark(); 197 } 198 199 private String opWithClientMeterName(Object op) { 200 // Extract meter name containing the client IP 201 String clientIP = RpcServer.getRemoteIp() != null ? RpcServer.getRemoteIp().toString() : ""; 202 if (clientIP.isEmpty()) { 203 return ""; 204 } 205 MetaTableOps ops = opsNameMap.get(op.getClass()); 206 if (ops == null) { 207 return ""; 208 } 209 switch (ops) { 210 case GET: 211 return String.format("MetaTable_client_%s_get_request", clientIP); 212 case PUT: 213 return String.format("MetaTable_client_%s_put_request", clientIP); 214 case DELETE: 215 return String.format("MetaTable_client_%s_delete_request", clientIP); 216 default: 217 return ""; 218 } 219 } 220 221 private String opMeterName(Object op) { 222 // Extract meter name containing the access type 223 MetaTableOps ops = opsNameMap.get(op.getClass()); 224 if (ops == null) { 225 return ""; 226 } 227 switch (ops) { 228 case GET: 229 return "MetaTable_get_request"; 230 case PUT: 231 return "MetaTable_put_request"; 232 case DELETE: 233 return "MetaTable_delete_request"; 234 default: 235 return ""; 236 } 237 } 238 239 private String tableMeterName(String tableName) { 240 // Extract meter name containing the table name 241 return String.format("MetaTable_table_%s_request", tableName); 242 } 243 244 private String clientRequestMeterName(String clientIP) { 245 // Extract meter name containing the client IP 246 if (clientIP.isEmpty()) { 247 return ""; 248 } 249 return String.format("MetaTable_client_%s_lossy_request", clientIP); 250 } 251 252 private String regionMeterName(String regionId) { 253 // Extract meter name containing the region ID 254 return String.format("MetaTable_region_%s_lossy_request", regionId); 255 } 256 } 257 258 @Override 259 public Optional<RegionObserver> getRegionObserver() { 260 return Optional.of(observer); 261 } 262 263 @Override 264 public void start(CoprocessorEnvironment env) throws IOException { 265 observer = new ExampleRegionObserverMeta(); 266 if ( 267 env instanceof RegionCoprocessorEnvironment 268 && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() != null 269 && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() 270 .equals(TableName.META_TABLE_NAME) 271 ) { 272 RegionCoprocessorEnvironment regionCoprocessorEnv = (RegionCoprocessorEnvironment) env; 273 registry = regionCoprocessorEnv.getMetricRegistryForRegionServer(); 274 LossyCounting.LossyCountingListener<String> listener = key -> { 275 registry.remove(key); 276 metrics.remove(key); 277 }; 278 final Configuration conf = regionCoprocessorEnv.getConfiguration(); 279 clientMetricsLossyCounting = new LossyCounting<>("clientMetaMetrics", conf, listener); 280 regionMetricsLossyCounting = new LossyCounting<>("regionMetaMetrics", conf, listener); 281 // only be active mode when this region holds meta table. 282 active = true; 283 } 284 } 285 286 @Override 287 public void stop(CoprocessorEnvironment env) throws IOException { 288 // since meta region can move around, clear stale metrics when stop. 289 for (String metric : metrics) { 290 registry.remove(metric); 291 } 292 } 293}