001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.hbtop.mode; 019 020import edu.umd.cs.findbugs.annotations.Nullable; 021import java.io.IOException; 022import java.util.ArrayList; 023import java.util.Arrays; 024import java.util.HashMap; 025import java.util.List; 026import java.util.Map; 027import java.util.stream.Collectors; 028 029import org.apache.commons.lang3.time.FastDateFormat; 030import org.apache.hadoop.hbase.ClusterMetrics; 031import org.apache.hadoop.hbase.RegionMetrics; 032import org.apache.hadoop.hbase.ServerMetrics; 033import org.apache.hadoop.hbase.TableName; 034import org.apache.hadoop.hbase.client.RegionInfo; 035import org.apache.hadoop.hbase.hbtop.Record; 036import org.apache.hadoop.hbase.hbtop.RecordFilter; 037import org.apache.hadoop.hbase.hbtop.field.Field; 038import org.apache.hadoop.hbase.hbtop.field.FieldInfo; 039import org.apache.hadoop.hbase.util.Bytes; 040import org.apache.yetus.audience.InterfaceAudience; 041 042 043/** 044 * Implementation for {@link ModeStrategy} for Region Mode. 045 */ 046@InterfaceAudience.Private 047public final class RegionModeStrategy implements ModeStrategy { 048 049 private final List<FieldInfo> fieldInfos = Arrays.asList( 050 new FieldInfo(Field.REGION_NAME, 0, false), 051 new FieldInfo(Field.NAMESPACE, 0, true), 052 new FieldInfo(Field.TABLE, 0, true), 053 new FieldInfo(Field.START_CODE, 13, false), 054 new FieldInfo(Field.REPLICA_ID, 5, false), 055 new FieldInfo(Field.REGION, 32, true), 056 new FieldInfo(Field.REGION_SERVER, 0, true), 057 new FieldInfo(Field.LONG_REGION_SERVER, 0, false), 058 new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 8, true), 059 new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 8, true), 060 new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), 061 new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 8, true), 062 new FieldInfo(Field.STORE_FILE_SIZE, 10, true), 063 new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 12, false), 064 new FieldInfo(Field.NUM_STORE_FILES,4, true), 065 new FieldInfo(Field.MEM_STORE_SIZE, 8, true), 066 new FieldInfo(Field.LOCALITY, 8, true), 067 new FieldInfo(Field.START_KEY, 0, false), 068 new FieldInfo(Field.COMPACTING_CELL_COUNT, 12, false), 069 new FieldInfo(Field.COMPACTED_CELL_COUNT, 12, false), 070 new FieldInfo(Field.COMPACTION_PROGRESS, 7, false), 071 new FieldInfo(Field.LAST_MAJOR_COMPACTION_TIME, 19, false) 072 ); 073 074 private final Map<String, RequestCountPerSecond> requestCountPerSecondMap = new HashMap<>(); 075 076 RegionModeStrategy() { 077 } 078 079 @Override 080 public List<FieldInfo> getFieldInfos() { 081 return fieldInfos; 082 } 083 084 @Override 085 public Field getDefaultSortField() { 086 return Field.REQUEST_COUNT_PER_SECOND; 087 } 088 089 @Override public List<Record> getRecords(ClusterMetrics clusterMetrics, 090 List<RecordFilter> pushDownFilters) { 091 List<Record> ret = new ArrayList<>(); 092 for (ServerMetrics sm : clusterMetrics.getLiveServerMetrics().values()) { 093 long lastReportTimestamp = sm.getLastReportTimestamp(); 094 for (RegionMetrics rm : sm.getRegionMetrics().values()) { 095 ret.add(createRecord(sm, rm, lastReportTimestamp)); 096 } 097 } 098 return ret; 099 } 100 101 private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMetrics, 102 long lastReportTimestamp) { 103 104 Record.Builder builder = Record.builder(); 105 106 String regionName = regionMetrics.getNameAsString(); 107 builder.put(Field.REGION_NAME, regionName); 108 109 String namespaceName = ""; 110 String tableName = ""; 111 String region = ""; 112 String startKey = ""; 113 String startCode = ""; 114 String replicaId = ""; 115 try { 116 byte[][] elements = RegionInfo.parseRegionName(regionMetrics.getRegionName()); 117 TableName tn = TableName.valueOf(elements[0]); 118 namespaceName = tn.getNamespaceAsString(); 119 tableName = tn.getQualifierAsString(); 120 startKey = Bytes.toStringBinary(elements[1]); 121 startCode = Bytes.toString(elements[2]); 122 replicaId = elements.length == 4 ? 123 Integer.valueOf(Bytes.toString(elements[3])).toString() : ""; 124 region = RegionInfo.encodeRegionName(regionMetrics.getRegionName()); 125 } catch (IOException ignored) { 126 } 127 128 builder.put(Field.NAMESPACE, namespaceName); 129 builder.put(Field.TABLE, tableName); 130 builder.put(Field.START_CODE, startCode); 131 builder.put(Field.REPLICA_ID, replicaId); 132 builder.put(Field.REGION, region); 133 builder.put(Field.START_KEY, startKey); 134 builder.put(Field.REGION_SERVER, serverMetrics.getServerName().toShortString()); 135 builder.put(Field.LONG_REGION_SERVER, serverMetrics.getServerName().getServerName()); 136 137 RequestCountPerSecond requestCountPerSecond = requestCountPerSecondMap.get(regionName); 138 if (requestCountPerSecond == null) { 139 requestCountPerSecond = new RequestCountPerSecond(); 140 requestCountPerSecondMap.put(regionName, requestCountPerSecond); 141 } 142 requestCountPerSecond.refresh(lastReportTimestamp, regionMetrics.getReadRequestCount(), 143 regionMetrics.getFilteredReadRequestCount(), regionMetrics.getWriteRequestCount()); 144 145 builder.put(Field.READ_REQUEST_COUNT_PER_SECOND, 146 requestCountPerSecond.getReadRequestCountPerSecond()); 147 builder.put(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 148 requestCountPerSecond.getFilteredReadRequestCountPerSecond()); 149 builder.put(Field.WRITE_REQUEST_COUNT_PER_SECOND, 150 requestCountPerSecond.getWriteRequestCountPerSecond()); 151 builder.put(Field.REQUEST_COUNT_PER_SECOND, 152 requestCountPerSecond.getRequestCountPerSecond()); 153 154 builder.put(Field.STORE_FILE_SIZE, regionMetrics.getStoreFileSize()); 155 builder.put(Field.UNCOMPRESSED_STORE_FILE_SIZE, regionMetrics.getUncompressedStoreFileSize()); 156 builder.put(Field.NUM_STORE_FILES, regionMetrics.getStoreFileCount()); 157 builder.put(Field.MEM_STORE_SIZE, regionMetrics.getMemStoreSize()); 158 builder.put(Field.LOCALITY, regionMetrics.getDataLocality()); 159 160 long compactingCellCount = regionMetrics.getCompactingCellCount(); 161 long compactedCellCount = regionMetrics.getCompactedCellCount(); 162 float compactionProgress = 0; 163 if (compactedCellCount > 0) { 164 compactionProgress = 100 * ((float) compactedCellCount / compactingCellCount); 165 } 166 167 builder.put(Field.COMPACTING_CELL_COUNT, compactingCellCount); 168 builder.put(Field.COMPACTED_CELL_COUNT, compactedCellCount); 169 builder.put(Field.COMPACTION_PROGRESS, compactionProgress); 170 171 FastDateFormat df = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss"); 172 long lastMajorCompactionTimestamp = regionMetrics.getLastMajorCompactionTimestamp(); 173 174 builder.put(Field.LAST_MAJOR_COMPACTION_TIME, 175 lastMajorCompactionTimestamp == 0 ? "" : df.format(lastMajorCompactionTimestamp)); 176 177 return builder.build(); 178 } 179 180 /** 181 * Form new record list with records formed by only fields provided through fieldInfo and 182 * add a count field for each record with value 1 183 * We are doing two operation of selecting and adding new field 184 * because of saving some CPU cycles on rebuilding the record again 185 * 186 * @param fieldInfos List of FieldInfos required in the record 187 * @param records List of records which needs to be processed 188 * @param countField Field which needs to be added with value 1 for each record 189 * @return records after selecting required fields and adding count field 190 */ 191 List<Record> selectModeFieldsAndAddCountField(List<FieldInfo> fieldInfos, List<Record> records, 192 Field countField) { 193 194 return records.stream().map(record -> Record.ofEntries( 195 fieldInfos.stream().filter(fi -> record.containsKey(fi.getField())) 196 .map(fi -> Record.entry(fi.getField(), record.get(fi.getField()))))) 197 .map(record -> Record.builder().putAll(record).put(countField, 1).build()) 198 .collect(Collectors.toList()); 199 } 200 201 @Nullable 202 @Override 203 public DrillDownInfo drillDown(Record selectedRecord) { 204 // do nothing 205 return null; 206 } 207}