001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.hbtop; 019 020import static org.hamcrest.CoreMatchers.is; 021import static org.hamcrest.MatcherAssert.assertThat; 022import static org.junit.Assert.fail; 023 024import java.text.ParseException; 025import java.util.ArrayList; 026import java.util.Collections; 027import java.util.HashMap; 028import java.util.List; 029import java.util.Map; 030import org.apache.commons.lang3.time.FastDateFormat; 031import org.apache.hadoop.hbase.ClusterMetrics; 032import org.apache.hadoop.hbase.ClusterMetricsBuilder; 033import org.apache.hadoop.hbase.RegionMetrics; 034import org.apache.hadoop.hbase.RegionMetricsBuilder; 035import org.apache.hadoop.hbase.ServerMetrics; 036import org.apache.hadoop.hbase.ServerMetricsBuilder; 037import org.apache.hadoop.hbase.ServerName; 038import org.apache.hadoop.hbase.Size; 039import org.apache.hadoop.hbase.TableName; 040import org.apache.hadoop.hbase.UserMetrics; 041import org.apache.hadoop.hbase.UserMetricsBuilder; 042import org.apache.hadoop.hbase.client.RegionInfoBuilder; 043import org.apache.hadoop.hbase.hbtop.field.Field; 044import org.apache.hadoop.hbase.hbtop.screen.top.Summary; 045import org.apache.hadoop.hbase.master.RegionState; 046import org.apache.hadoop.hbase.util.Bytes; 047 048public final class TestUtils { 049 050 private TestUtils() { 051 } 052 053 public static ClusterMetrics createDummyClusterMetrics() { 054 Map<ServerName, ServerMetrics> serverMetricsMap = new HashMap<>(); 055 056 // host1 057 List<RegionMetrics> regionMetricsList = new ArrayList<>(); 058 List<UserMetrics> userMetricsList = new ArrayList<>(); 059 userMetricsList.add(createUserMetrics("FOO", 1, 2, 4)); 060 userMetricsList.add(createUserMetrics("BAR", 2, 3, 3)); 061 regionMetricsList.add(createRegionMetrics("table1,,1.00000000000000000000000000000000.", 100, 062 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, 063 new Size(100, Size.Unit.MEGABYTE), 0.1f, 100, 100, "2019-07-22 00:00:00")); 064 regionMetricsList.add(createRegionMetrics("table2,1,2.00000000000000000000000000000001.", 200, 065 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, 066 new Size(200, Size.Unit.MEGABYTE), 0.2f, 50, 200, "2019-07-22 00:00:01")); 067 regionMetricsList 068 .add(createRegionMetrics("namespace:table3,,3_0001.00000000000000000000000000000002.", 300, 069 150, 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, 070 new Size(300, Size.Unit.MEGABYTE), 0.3f, 100, 300, "2019-07-22 00:00:02")); 071 072 ServerName host1 = ServerName.valueOf("host1.apache.com", 1000, 1); 073 serverMetricsMap.put(host1, createServerMetrics(host1, 100, new Size(100, Size.Unit.MEGABYTE), 074 new Size(200, Size.Unit.MEGABYTE), 100, regionMetricsList, userMetricsList)); 075 076 // host2 077 regionMetricsList.clear(); 078 userMetricsList.clear(); 079 userMetricsList.add(createUserMetrics("FOO", 5, 7, 3)); 080 userMetricsList.add(createUserMetrics("BAR", 4, 8, 4)); 081 regionMetricsList.add(createRegionMetrics("table1,1,4.00000000000000000000000000000003.", 100, 082 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, 083 new Size(100, Size.Unit.MEGABYTE), 0.4f, 50, 100, "2019-07-22 00:00:03")); 084 regionMetricsList.add(createRegionMetrics("table2,,5.00000000000000000000000000000004.", 200, 085 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, 086 new Size(200, Size.Unit.MEGABYTE), 0.5f, 150, 200, "2019-07-22 00:00:04")); 087 regionMetricsList 088 .add(createRegionMetrics("namespace:table3,,6.00000000000000000000000000000005.", 300, 150, 089 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, 090 new Size(300, Size.Unit.MEGABYTE), 0.6f, 200, 300, "2019-07-22 00:00:05")); 091 092 ServerName host2 = ServerName.valueOf("host2.apache.com", 1001, 2); 093 serverMetricsMap.put(host2, createServerMetrics(host2, 200, new Size(16, Size.Unit.GIGABYTE), 094 new Size(32, Size.Unit.GIGABYTE), 200, regionMetricsList, userMetricsList)); 095 096 ServerName host3 = ServerName.valueOf("host3.apache.com", 1002, 3); 097 return ClusterMetricsBuilder.newBuilder().setHBaseVersion("3.0.0-SNAPSHOT") 098 .setClusterId("01234567-89ab-cdef-0123-456789abcdef").setLiveServerMetrics(serverMetricsMap) 099 .setDeadServerNames(Collections.singletonList(host3)) 100 .setRegionsInTransition(Collections 101 .singletonList(new RegionState(RegionInfoBuilder.newBuilder(TableName.valueOf("table4")) 102 .setStartKey(new byte[0]).setEndKey(new byte[0]).setOffline(true).setReplicaId(0) 103 .setRegionId(0).setSplit(false).build(), RegionState.State.OFFLINE, host3))) 104 .build(); 105 } 106 107 private static UserMetrics createUserMetrics(String user, long readRequestCount, 108 long writeRequestCount, long filteredReadRequestsCount) { 109 return UserMetricsBuilder.newBuilder(Bytes.toBytes(user)) 110 .addClientMetris(new UserMetricsBuilder.ClientMetricsImpl("CLIENT_A_" + user, 111 readRequestCount, writeRequestCount, filteredReadRequestsCount)) 112 .addClientMetris(new UserMetricsBuilder.ClientMetricsImpl("CLIENT_B_" + user, 113 readRequestCount, writeRequestCount, filteredReadRequestsCount)) 114 .build(); 115 } 116 117 @SuppressWarnings("JavaUtilDate") 118 private static RegionMetrics createRegionMetrics(String regionName, long readRequestCount, 119 long filteredReadRequestCount, long writeRequestCount, Size storeFileSize, 120 Size uncompressedStoreFileSize, int storeFileCount, Size memStoreSize, float locality, 121 long compactedCellCount, long compactingCellCount, String lastMajorCompactionTime) { 122 123 FastDateFormat df = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss"); 124 try { 125 return RegionMetricsBuilder.newBuilder(Bytes.toBytes(regionName)) 126 .setReadRequestCount(readRequestCount).setFilteredReadRequestCount(filteredReadRequestCount) 127 .setWriteRequestCount(writeRequestCount).setStoreFileSize(storeFileSize) 128 .setUncompressedStoreFileSize(uncompressedStoreFileSize).setStoreFileCount(storeFileCount) 129 .setMemStoreSize(memStoreSize).setDataLocality(locality) 130 .setCompactedCellCount(compactedCellCount).setCompactingCellCount(compactingCellCount) 131 .setLastMajorCompactionTimestamp(df.parse(lastMajorCompactionTime).getTime()).build(); 132 } catch (ParseException e) { 133 throw new IllegalArgumentException(e); 134 } 135 } 136 137 private static ServerMetrics createServerMetrics(ServerName serverName, long reportTimestamp, 138 Size usedHeapSize, Size maxHeapSize, long requestCountPerSecond, 139 List<RegionMetrics> regionMetricsList, List<UserMetrics> userMetricsList) { 140 141 return ServerMetricsBuilder.newBuilder(serverName).setReportTimestamp(reportTimestamp) 142 .setUsedHeapSize(usedHeapSize).setMaxHeapSize(maxHeapSize) 143 .setRequestCountPerSecond(requestCountPerSecond).setRegionMetrics(regionMetricsList) 144 .setUserMetrics(userMetricsList).build(); 145 } 146 147 public static void assertRecordsInRegionMode(List<Record> records) { 148 assertThat(records.size(), is(6)); 149 150 for (Record record : records) { 151 switch (record.get(Field.REGION_NAME).asString()) { 152 case "table1,,1.00000000000000000000000000000000.": 153 assertRecordInRegionMode(record, "default", "1", "", "table1", 154 "00000000000000000000000000000000", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, 155 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, 156 new Size(100, Size.Unit.MEGABYTE), 0.1f, "", 100L, 100L, 100f, "2019-07-22 00:00:00"); 157 break; 158 159 case "table1,1,4.00000000000000000000000000000003.": 160 assertRecordInRegionMode(record, "default", "4", "", "table1", 161 "00000000000000000000000000000003", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, 162 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, 163 new Size(100, Size.Unit.MEGABYTE), 0.4f, "1", 100L, 50L, 50f, "2019-07-22 00:00:03"); 164 break; 165 166 case "table2,,5.00000000000000000000000000000004.": 167 assertRecordInRegionMode(record, "default", "5", "", "table2", 168 "00000000000000000000000000000004", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, 169 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, 170 new Size(200, Size.Unit.MEGABYTE), 0.5f, "", 200L, 150L, 75f, "2019-07-22 00:00:04"); 171 break; 172 173 case "table2,1,2.00000000000000000000000000000001.": 174 assertRecordInRegionMode(record, "default", "2", "", "table2", 175 "00000000000000000000000000000001", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, 176 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, 177 new Size(200, Size.Unit.MEGABYTE), 0.2f, "1", 200L, 50L, 25f, "2019-07-22 00:00:01"); 178 break; 179 180 case "namespace:table3,,6.00000000000000000000000000000005.": 181 assertRecordInRegionMode(record, "namespace", "6", "", "table3", 182 "00000000000000000000000000000005", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, 183 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, 184 new Size(300, Size.Unit.MEGABYTE), 0.6f, "", 300L, 200L, 66.66667f, 185 "2019-07-22 00:00:05"); 186 break; 187 188 case "namespace:table3,,3_0001.00000000000000000000000000000002.": 189 assertRecordInRegionMode(record, "namespace", "3", "1", "table3", 190 "00000000000000000000000000000002", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, 191 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, 192 new Size(300, Size.Unit.MEGABYTE), 0.3f, "", 300L, 100L, 33.333336f, 193 "2019-07-22 00:00:02"); 194 break; 195 196 default: 197 fail(); 198 } 199 } 200 } 201 202 private static void assertRecordInRegionMode(Record record, String namespace, String startCode, 203 String replicaId, String table, String region, String regionServer, String longRegionServer, 204 long requestCountPerSecond, long readRequestCountPerSecond, 205 long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, Size storeFileSize, 206 Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, float locality, 207 String startKey, long compactingCellCount, long compactedCellCount, float compactionProgress, 208 String lastMajorCompactionTime) { 209 assertThat(record.size(), is(22)); 210 assertThat(record.get(Field.NAMESPACE).asString(), is(namespace)); 211 assertThat(record.get(Field.START_CODE).asString(), is(startCode)); 212 assertThat(record.get(Field.REPLICA_ID).asString(), is(replicaId)); 213 assertThat(record.get(Field.TABLE).asString(), is(table)); 214 assertThat(record.get(Field.REGION).asString(), is(region)); 215 assertThat(record.get(Field.REGION_SERVER).asString(), is(regionServer)); 216 assertThat(record.get(Field.LONG_REGION_SERVER).asString(), is(longRegionServer)); 217 assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); 218 assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), 219 is(readRequestCountPerSecond)); 220 assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), 221 is(filteredReadRequestCountPerSecond)); 222 assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), 223 is(writeCountRequestPerSecond)); 224 assertThat(record.get(Field.STORE_FILE_SIZE).asSize(), is(storeFileSize)); 225 assertThat(record.get(Field.UNCOMPRESSED_STORE_FILE_SIZE).asSize(), 226 is(uncompressedStoreFileSize)); 227 assertThat(record.get(Field.NUM_STORE_FILES).asInt(), is(numStoreFiles)); 228 assertThat(record.get(Field.MEM_STORE_SIZE).asSize(), is(memStoreSize)); 229 assertThat(record.get(Field.LOCALITY).asFloat(), is(locality)); 230 assertThat(record.get(Field.START_KEY).asString(), is(startKey)); 231 assertThat(record.get(Field.COMPACTING_CELL_COUNT).asLong(), is(compactingCellCount)); 232 assertThat(record.get(Field.COMPACTED_CELL_COUNT).asLong(), is(compactedCellCount)); 233 assertThat(record.get(Field.COMPACTION_PROGRESS).asFloat(), is(compactionProgress)); 234 assertThat(record.get(Field.LAST_MAJOR_COMPACTION_TIME).asString(), 235 is(lastMajorCompactionTime)); 236 } 237 238 public static void assertRecordsInNamespaceMode(List<Record> records) { 239 assertThat(records.size(), is(2)); 240 241 for (Record record : records) { 242 switch (record.get(Field.NAMESPACE).asString()) { 243 case "default": 244 assertRecordInNamespaceMode(record, 0L, 0L, 0L, 0L, new Size(600, Size.Unit.MEGABYTE), 245 new Size(1200, Size.Unit.MEGABYTE), 6, new Size(600, Size.Unit.MEGABYTE), 4); 246 break; 247 248 case "namespace": 249 assertRecordInNamespaceMode(record, 0L, 0L, 0L, 0L, new Size(600, Size.Unit.MEGABYTE), 250 new Size(1200, Size.Unit.MEGABYTE), 6, new Size(600, Size.Unit.MEGABYTE), 2); 251 break; 252 253 default: 254 fail(); 255 } 256 } 257 } 258 259 private static void assertRecordInNamespaceMode(Record record, long requestCountPerSecond, 260 long readRequestCountPerSecond, long filteredReadRequestCountPerSecond, 261 long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, 262 int numStoreFiles, Size memStoreSize, int regionCount) { 263 assertThat(record.size(), is(10)); 264 assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); 265 assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), 266 is(readRequestCountPerSecond)); 267 assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), 268 is(filteredReadRequestCountPerSecond)); 269 assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), 270 is(writeCountRequestPerSecond)); 271 assertThat(record.get(Field.STORE_FILE_SIZE).asSize(), is(storeFileSize)); 272 assertThat(record.get(Field.UNCOMPRESSED_STORE_FILE_SIZE).asSize(), 273 is(uncompressedStoreFileSize)); 274 assertThat(record.get(Field.NUM_STORE_FILES).asInt(), is(numStoreFiles)); 275 assertThat(record.get(Field.MEM_STORE_SIZE).asSize(), is(memStoreSize)); 276 assertThat(record.get(Field.REGION_COUNT).asInt(), is(regionCount)); 277 } 278 279 public static void assertRecordsInTableMode(List<Record> records) { 280 assertThat(records.size(), is(3)); 281 282 for (Record record : records) { 283 String tableName = String.format("%s:%s", record.get(Field.NAMESPACE).asString(), 284 record.get(Field.TABLE).asString()); 285 286 switch (tableName) { 287 case "default:table1": 288 assertRecordInTableMode(record, 0L, 0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE), 289 new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 2); 290 break; 291 292 case "default:table2": 293 assertRecordInTableMode(record, 0L, 0L, 0L, 0L, new Size(400, Size.Unit.MEGABYTE), 294 new Size(800, Size.Unit.MEGABYTE), 4, new Size(400, Size.Unit.MEGABYTE), 2); 295 break; 296 297 case "namespace:table3": 298 assertRecordInTableMode(record, 0L, 0L, 0L, 0L, new Size(600, Size.Unit.MEGABYTE), 299 new Size(1200, Size.Unit.MEGABYTE), 6, new Size(600, Size.Unit.MEGABYTE), 2); 300 break; 301 302 default: 303 fail(); 304 } 305 } 306 } 307 308 public static void assertRecordsInUserMode(List<Record> records) { 309 assertThat(records.size(), is(2)); 310 for (Record record : records) { 311 String user = record.get(Field.USER).asString(); 312 switch (user) { 313 // readRequestPerSecond and writeRequestPerSecond will be zero 314 // because there is no change or new metrics during refresh 315 case "FOO": 316 assertRecordInUserMode(record, 0L, 0L, 0L); 317 break; 318 case "BAR": 319 assertRecordInUserMode(record, 0L, 0L, 0L); 320 break; 321 default: 322 fail(); 323 } 324 } 325 } 326 327 public static void assertRecordsInClientMode(List<Record> records) { 328 assertThat(records.size(), is(4)); 329 for (Record record : records) { 330 String client = record.get(Field.CLIENT).asString(); 331 switch (client) { 332 // readRequestPerSecond and writeRequestPerSecond will be zero 333 // because there is no change or new metrics during refresh 334 case "CLIENT_A_FOO": 335 assertRecordInClientMode(record, 0L, 0L, 0L); 336 break; 337 case "CLIENT_A_BAR": 338 assertRecordInClientMode(record, 0L, 0L, 0L); 339 break; 340 case "CLIENT_B_FOO": 341 assertRecordInClientMode(record, 0L, 0L, 0L); 342 break; 343 case "CLIENT_B_BAR": 344 assertRecordInClientMode(record, 0L, 0L, 0L); 345 break; 346 default: 347 fail(); 348 } 349 } 350 } 351 352 private static void assertRecordInUserMode(Record record, long readRequestCountPerSecond, 353 long writeCountRequestPerSecond, long filteredReadRequestsCount) { 354 assertThat(record.size(), is(6)); 355 assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), 356 is(readRequestCountPerSecond)); 357 assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), 358 is(writeCountRequestPerSecond)); 359 assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), 360 is(filteredReadRequestsCount)); 361 assertThat(record.get(Field.CLIENT_COUNT).asInt(), is(2)); 362 } 363 364 private static void assertRecordInClientMode(Record record, long readRequestCountPerSecond, 365 long writeCountRequestPerSecond, long filteredReadRequestsCount) { 366 assertThat(record.size(), is(6)); 367 assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), 368 is(readRequestCountPerSecond)); 369 assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), 370 is(writeCountRequestPerSecond)); 371 assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), 372 is(filteredReadRequestsCount)); 373 assertThat(record.get(Field.USER_COUNT).asInt(), is(1)); 374 } 375 376 private static void assertRecordInTableMode(Record record, long requestCountPerSecond, 377 long readRequestCountPerSecond, long filteredReadRequestCountPerSecond, 378 long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, 379 int numStoreFiles, Size memStoreSize, int regionCount) { 380 assertThat(record.size(), is(11)); 381 assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); 382 assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), 383 is(readRequestCountPerSecond)); 384 assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), 385 is(filteredReadRequestCountPerSecond)); 386 assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), 387 is(writeCountRequestPerSecond)); 388 assertThat(record.get(Field.STORE_FILE_SIZE).asSize(), is(storeFileSize)); 389 assertThat(record.get(Field.UNCOMPRESSED_STORE_FILE_SIZE).asSize(), 390 is(uncompressedStoreFileSize)); 391 assertThat(record.get(Field.NUM_STORE_FILES).asInt(), is(numStoreFiles)); 392 assertThat(record.get(Field.MEM_STORE_SIZE).asSize(), is(memStoreSize)); 393 assertThat(record.get(Field.REGION_COUNT).asInt(), is(regionCount)); 394 } 395 396 public static void assertRecordsInRegionServerMode(List<Record> records) { 397 assertThat(records.size(), is(2)); 398 399 for (Record record : records) { 400 switch (record.get(Field.REGION_SERVER).asString()) { 401 case "host1:1000": 402 assertRecordInRegionServerMode(record, "host1.apache.com,1000,1", 0L, 0L, 0L, 0L, 403 new Size(600, Size.Unit.MEGABYTE), new Size(1200, Size.Unit.MEGABYTE), 6, 404 new Size(600, Size.Unit.MEGABYTE), 3, new Size(100, Size.Unit.MEGABYTE), 405 new Size(200, Size.Unit.MEGABYTE)); 406 break; 407 408 case "host2:1001": 409 assertRecordInRegionServerMode(record, "host2.apache.com,1001,2", 0L, 0L, 0L, 0L, 410 new Size(600, Size.Unit.MEGABYTE), new Size(1200, Size.Unit.MEGABYTE), 6, 411 new Size(600, Size.Unit.MEGABYTE), 3, new Size(16, Size.Unit.GIGABYTE), 412 new Size(32, Size.Unit.GIGABYTE)); 413 break; 414 415 default: 416 fail(); 417 } 418 } 419 } 420 421 private static void assertRecordInRegionServerMode(Record record, String longRegionServer, 422 long requestCountPerSecond, long readRequestCountPerSecond, 423 long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, Size storeFileSize, 424 Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, int regionCount, 425 Size usedHeapSize, Size maxHeapSize) { 426 assertThat(record.size(), is(13)); 427 assertThat(record.get(Field.LONG_REGION_SERVER).asString(), is(longRegionServer)); 428 assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); 429 assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), 430 is(readRequestCountPerSecond)); 431 assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), 432 is(filteredReadRequestCountPerSecond)); 433 assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), 434 is(writeCountRequestPerSecond)); 435 assertThat(record.get(Field.STORE_FILE_SIZE).asSize(), is(storeFileSize)); 436 assertThat(record.get(Field.UNCOMPRESSED_STORE_FILE_SIZE).asSize(), 437 is(uncompressedStoreFileSize)); 438 assertThat(record.get(Field.NUM_STORE_FILES).asInt(), is(numStoreFiles)); 439 assertThat(record.get(Field.MEM_STORE_SIZE).asSize(), is(memStoreSize)); 440 assertThat(record.get(Field.REGION_COUNT).asInt(), is(regionCount)); 441 assertThat(record.get(Field.USED_HEAP_SIZE).asSize(), is(usedHeapSize)); 442 assertThat(record.get(Field.MAX_HEAP_SIZE).asSize(), is(maxHeapSize)); 443 } 444 445 public static void assertSummary(Summary summary) { 446 assertThat(summary.getVersion(), is("3.0.0-SNAPSHOT")); 447 assertThat(summary.getClusterId(), is("01234567-89ab-cdef-0123-456789abcdef")); 448 assertThat(summary.getServers(), is(3)); 449 assertThat(summary.getLiveServers(), is(2)); 450 assertThat(summary.getDeadServers(), is(1)); 451 assertThat(summary.getRegionCount(), is(6)); 452 assertThat(summary.getRitCount(), is(1)); 453 assertThat(summary.getAverageLoad(), is(3.0)); 454 assertThat(summary.getAggregateRequestPerSecond(), is(300L)); 455 } 456}