001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.hbtop.mode; 019 020import edu.umd.cs.findbugs.annotations.Nullable; 021import java.io.IOException; 022import java.util.ArrayList; 023import java.util.Arrays; 024import java.util.HashMap; 025import java.util.List; 026import java.util.Map; 027import java.util.stream.Collectors; 028import org.apache.commons.lang3.time.FastDateFormat; 029import org.apache.hadoop.hbase.ClusterMetrics; 030import org.apache.hadoop.hbase.RegionMetrics; 031import org.apache.hadoop.hbase.ServerMetrics; 032import org.apache.hadoop.hbase.TableName; 033import org.apache.hadoop.hbase.client.RegionInfo; 034import org.apache.hadoop.hbase.hbtop.Record; 035import org.apache.hadoop.hbase.hbtop.RecordFilter; 036import org.apache.hadoop.hbase.hbtop.field.Field; 037import org.apache.hadoop.hbase.hbtop.field.FieldInfo; 038import org.apache.hadoop.hbase.util.Bytes; 039import org.apache.yetus.audience.InterfaceAudience; 040 041/** 042 * Implementation for {@link ModeStrategy} for Region Mode. 043 */ 044@InterfaceAudience.Private 045public final class RegionModeStrategy implements ModeStrategy { 046 047 private final List<FieldInfo> fieldInfos = Arrays.asList( 048 new FieldInfo(Field.REGION_NAME, 0, false), new FieldInfo(Field.NAMESPACE, 0, true), 049 new FieldInfo(Field.TABLE, 0, true), new FieldInfo(Field.START_CODE, 13, false), 050 new FieldInfo(Field.REPLICA_ID, 5, false), new FieldInfo(Field.REGION, 32, true), 051 new FieldInfo(Field.REGION_SERVER, 0, true), new FieldInfo(Field.LONG_REGION_SERVER, 0, false), 052 new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 8, true), 053 new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 8, true), 054 new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), 055 new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 8, true), 056 new FieldInfo(Field.STORE_FILE_SIZE, 10, true), 057 new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 12, false), 058 new FieldInfo(Field.NUM_STORE_FILES, 4, true), new FieldInfo(Field.MEM_STORE_SIZE, 8, true), 059 new FieldInfo(Field.LOCALITY, 8, true), new FieldInfo(Field.START_KEY, 0, false), 060 new FieldInfo(Field.COMPACTING_CELL_COUNT, 12, false), 061 new FieldInfo(Field.COMPACTED_CELL_COUNT, 12, false), 062 new FieldInfo(Field.COMPACTION_PROGRESS, 7, false), 063 new FieldInfo(Field.LAST_MAJOR_COMPACTION_TIME, 19, false)); 064 065 private final Map<String, RequestCountPerSecond> requestCountPerSecondMap = new HashMap<>(); 066 067 RegionModeStrategy() { 068 } 069 070 @Override 071 public List<FieldInfo> getFieldInfos() { 072 return fieldInfos; 073 } 074 075 @Override 076 public Field getDefaultSortField() { 077 return Field.REQUEST_COUNT_PER_SECOND; 078 } 079 080 @Override 081 public List<Record> getRecords(ClusterMetrics clusterMetrics, 082 List<RecordFilter> pushDownFilters) { 083 List<Record> ret = new ArrayList<>(); 084 for (ServerMetrics sm : clusterMetrics.getLiveServerMetrics().values()) { 085 long lastReportTimestamp = sm.getLastReportTimestamp(); 086 for (RegionMetrics rm : sm.getRegionMetrics().values()) { 087 ret.add(createRecord(sm, rm, lastReportTimestamp)); 088 } 089 } 090 return ret; 091 } 092 093 private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMetrics, 094 long lastReportTimestamp) { 095 096 Record.Builder builder = Record.builder(); 097 098 String regionName = regionMetrics.getNameAsString(); 099 builder.put(Field.REGION_NAME, regionName); 100 101 String namespaceName = ""; 102 String tableName = ""; 103 String region = ""; 104 String startKey = ""; 105 String startCode = ""; 106 String replicaId = ""; 107 try { 108 byte[][] elements = RegionInfo.parseRegionName(regionMetrics.getRegionName()); 109 TableName tn = TableName.valueOf(elements[0]); 110 namespaceName = tn.getNamespaceAsString(); 111 tableName = tn.getQualifierAsString(); 112 startKey = Bytes.toStringBinary(elements[1]); 113 startCode = Bytes.toString(elements[2]); 114 replicaId = 115 elements.length == 4 ? Integer.valueOf(Bytes.toString(elements[3])).toString() : ""; 116 region = RegionInfo.encodeRegionName(regionMetrics.getRegionName()); 117 } catch (IOException ignored) { 118 // Exception deliberately ignored 119 } 120 121 builder.put(Field.NAMESPACE, namespaceName); 122 builder.put(Field.TABLE, tableName); 123 builder.put(Field.START_CODE, startCode); 124 builder.put(Field.REPLICA_ID, replicaId); 125 builder.put(Field.REGION, region); 126 builder.put(Field.START_KEY, startKey); 127 builder.put(Field.REGION_SERVER, serverMetrics.getServerName().toShortString()); 128 builder.put(Field.LONG_REGION_SERVER, serverMetrics.getServerName().getServerName()); 129 130 RequestCountPerSecond requestCountPerSecond = requestCountPerSecondMap.get(regionName); 131 if (requestCountPerSecond == null) { 132 requestCountPerSecond = new RequestCountPerSecond(); 133 requestCountPerSecondMap.put(regionName, requestCountPerSecond); 134 } 135 requestCountPerSecond.refresh(lastReportTimestamp, regionMetrics.getReadRequestCount(), 136 regionMetrics.getFilteredReadRequestCount(), regionMetrics.getWriteRequestCount()); 137 138 builder.put(Field.READ_REQUEST_COUNT_PER_SECOND, 139 requestCountPerSecond.getReadRequestCountPerSecond()); 140 builder.put(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 141 requestCountPerSecond.getFilteredReadRequestCountPerSecond()); 142 builder.put(Field.WRITE_REQUEST_COUNT_PER_SECOND, 143 requestCountPerSecond.getWriteRequestCountPerSecond()); 144 builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getRequestCountPerSecond()); 145 146 builder.put(Field.STORE_FILE_SIZE, regionMetrics.getStoreFileSize()); 147 builder.put(Field.UNCOMPRESSED_STORE_FILE_SIZE, regionMetrics.getUncompressedStoreFileSize()); 148 builder.put(Field.NUM_STORE_FILES, regionMetrics.getStoreFileCount()); 149 builder.put(Field.MEM_STORE_SIZE, regionMetrics.getMemStoreSize()); 150 builder.put(Field.LOCALITY, regionMetrics.getDataLocality()); 151 152 long compactingCellCount = regionMetrics.getCompactingCellCount(); 153 long compactedCellCount = regionMetrics.getCompactedCellCount(); 154 float compactionProgress = 0; 155 if (compactedCellCount > 0) { 156 compactionProgress = 100 * ((float) compactedCellCount / compactingCellCount); 157 } 158 159 builder.put(Field.COMPACTING_CELL_COUNT, compactingCellCount); 160 builder.put(Field.COMPACTED_CELL_COUNT, compactedCellCount); 161 builder.put(Field.COMPACTION_PROGRESS, compactionProgress); 162 163 FastDateFormat df = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss"); 164 long lastMajorCompactionTimestamp = regionMetrics.getLastMajorCompactionTimestamp(); 165 166 builder.put(Field.LAST_MAJOR_COMPACTION_TIME, 167 lastMajorCompactionTimestamp == 0 ? "" : df.format(lastMajorCompactionTimestamp)); 168 169 return builder.build(); 170 } 171 172 /** 173 * Form new record list with records formed by only fields provided through fieldInfo and add a 174 * count field for each record with value 1 We are doing two operation of selecting and adding new 175 * field because of saving some CPU cycles on rebuilding the record again 176 * @param fieldInfos List of FieldInfos required in the record 177 * @param records List of records which needs to be processed 178 * @param countField Field which needs to be added with value 1 for each record 179 * @return records after selecting required fields and adding count field 180 */ 181 List<Record> selectModeFieldsAndAddCountField(List<FieldInfo> fieldInfos, List<Record> records, 182 Field countField) { 183 184 return records.stream().map( 185 record -> Record.ofEntries(fieldInfos.stream().filter(fi -> record.containsKey(fi.getField())) 186 .map(fi -> Record.entry(fi.getField(), record.get(fi.getField()))))) 187 .map(record -> Record.builder().putAll(record).put(countField, 1).build()) 188 .collect(Collectors.toList()); 189 } 190 191 @Nullable 192 @Override 193 public DrillDownInfo drillDown(Record selectedRecord) { 194 // do nothing 195 return null; 196 } 197}