001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.quotas; 019 020import java.io.IOException; 021import java.util.Arrays; 022import java.util.HashMap; 023import java.util.List; 024import java.util.Map; 025import java.util.Optional; 026import org.apache.hadoop.conf.Configuration; 027import org.apache.hadoop.hbase.Cell; 028import org.apache.hadoop.hbase.DoNotRetryIOException; 029import org.apache.hadoop.hbase.HConstants; 030import org.apache.hadoop.hbase.TableName; 031import org.apache.hadoop.hbase.TableNotDisabledException; 032import org.apache.hadoop.hbase.TableNotEnabledException; 033import org.apache.hadoop.hbase.TableNotFoundException; 034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 035import org.apache.hadoop.hbase.client.Connection; 036import org.apache.hadoop.hbase.client.Delete; 037import org.apache.hadoop.hbase.client.Get; 038import org.apache.hadoop.hbase.client.Mutation; 039import org.apache.hadoop.hbase.client.Put; 040import org.apache.hadoop.hbase.client.Result; 041import org.apache.hadoop.hbase.client.Table; 042import org.apache.hadoop.hbase.client.TableDescriptor; 043import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 044import org.apache.hadoop.hbase.regionserver.BloomType; 045import org.apache.hadoop.hbase.util.Bytes; 046import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 047import org.apache.hadoop.hbase.util.Pair; 048import org.apache.yetus.audience.InterfaceAudience; 049import org.apache.yetus.audience.InterfaceStability; 050import org.slf4j.Logger; 051import org.slf4j.LoggerFactory; 052 053import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 054import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; 055import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TimeUnit; 056import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; 057import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.QuotaScope; 058import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; 059import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle; 060import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota; 061 062/** 063 * Helper class to interact with the quota table 064 */ 065@InterfaceAudience.Private 066@InterfaceStability.Evolving 067public class QuotaUtil extends QuotaTableUtil { 068 private static final Logger LOG = LoggerFactory.getLogger(QuotaUtil.class); 069 070 public static final String QUOTA_CONF_KEY = "hbase.quota.enabled"; 071 private static final boolean QUOTA_ENABLED_DEFAULT = false; 072 073 public static final String READ_CAPACITY_UNIT_CONF_KEY = "hbase.quota.read.capacity.unit"; 074 // the default one read capacity unit is 1024 bytes (1KB) 075 public static final long DEFAULT_READ_CAPACITY_UNIT = 1024; 076 public static final String WRITE_CAPACITY_UNIT_CONF_KEY = "hbase.quota.write.capacity.unit"; 077 // the default one write capacity unit is 1024 bytes (1KB) 078 public static final long DEFAULT_WRITE_CAPACITY_UNIT = 1024; 079 080 /* 081 * The below defaults, if configured, will be applied to otherwise unthrottled users. For example, 082 * set `hbase.quota.default.user.machine.read.size` to `1048576` in your hbase-site.xml to ensure 083 * that any given user may not query more than 1mb per second from any given machine, unless 084 * explicitly permitted by a persisted quota. All of these defaults use TimeUnit.SECONDS and 085 * QuotaScope.MACHINE. 086 */ 087 public static final String QUOTA_DEFAULT_USER_MACHINE_READ_NUM = 088 "hbase.quota.default.user.machine.read.num"; 089 public static final String QUOTA_DEFAULT_USER_MACHINE_READ_SIZE = 090 "hbase.quota.default.user.machine.read.size"; 091 public static final String QUOTA_DEFAULT_USER_MACHINE_REQUEST_NUM = 092 "hbase.quota.default.user.machine.request.num"; 093 public static final String QUOTA_DEFAULT_USER_MACHINE_REQUEST_SIZE = 094 "hbase.quota.default.user.machine.request.size"; 095 public static final String QUOTA_DEFAULT_USER_MACHINE_WRITE_NUM = 096 "hbase.quota.default.user.machine.write.num"; 097 public static final String QUOTA_DEFAULT_USER_MACHINE_WRITE_SIZE = 098 "hbase.quota.default.user.machine.write.size"; 099 100 /** Table descriptor for Quota internal table */ 101 public static final TableDescriptor QUOTA_TABLE_DESC = 102 TableDescriptorBuilder.newBuilder(QUOTA_TABLE_NAME) 103 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(QUOTA_FAMILY_INFO) 104 .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.ROW) 105 .setMaxVersions(1).build()) 106 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(QUOTA_FAMILY_USAGE) 107 .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.ROW) 108 .setMaxVersions(1).build()) 109 .build(); 110 111 /** Returns true if the support for quota is enabled */ 112 public static boolean isQuotaEnabled(final Configuration conf) { 113 return conf.getBoolean(QUOTA_CONF_KEY, QUOTA_ENABLED_DEFAULT); 114 } 115 116 /* 117 * ========================================================================= Quota "settings" 118 * helpers 119 */ 120 public static void addTableQuota(final Connection connection, final TableName table, 121 final Quotas data) throws IOException { 122 addQuotas(connection, getTableRowKey(table), data); 123 } 124 125 public static void deleteTableQuota(final Connection connection, final TableName table) 126 throws IOException { 127 deleteQuotas(connection, getTableRowKey(table)); 128 } 129 130 public static void addNamespaceQuota(final Connection connection, final String namespace, 131 final Quotas data) throws IOException { 132 addQuotas(connection, getNamespaceRowKey(namespace), data); 133 } 134 135 public static void deleteNamespaceQuota(final Connection connection, final String namespace) 136 throws IOException { 137 deleteQuotas(connection, getNamespaceRowKey(namespace)); 138 } 139 140 public static void addUserQuota(final Connection connection, final String user, final Quotas data) 141 throws IOException { 142 addQuotas(connection, getUserRowKey(user), data); 143 } 144 145 public static void addUserQuota(final Connection connection, final String user, 146 final TableName table, final Quotas data) throws IOException { 147 addQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table), data); 148 } 149 150 public static void addUserQuota(final Connection connection, final String user, 151 final String namespace, final Quotas data) throws IOException { 152 addQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserNamespace(namespace), 153 data); 154 } 155 156 public static void deleteUserQuota(final Connection connection, final String user) 157 throws IOException { 158 deleteQuotas(connection, getUserRowKey(user)); 159 } 160 161 public static void deleteUserQuota(final Connection connection, final String user, 162 final TableName table) throws IOException { 163 deleteQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table)); 164 } 165 166 public static void deleteUserQuota(final Connection connection, final String user, 167 final String namespace) throws IOException { 168 deleteQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserNamespace(namespace)); 169 } 170 171 public static void addRegionServerQuota(final Connection connection, final String regionServer, 172 final Quotas data) throws IOException { 173 addQuotas(connection, getRegionServerRowKey(regionServer), data); 174 } 175 176 public static void deleteRegionServerQuota(final Connection connection, final String regionServer) 177 throws IOException { 178 deleteQuotas(connection, getRegionServerRowKey(regionServer)); 179 } 180 181 public static OperationQuota.OperationType getQuotaOperationType(ClientProtos.Action action, 182 boolean hasCondition) { 183 if (action.hasMutation()) { 184 return getQuotaOperationType(action.getMutation(), hasCondition); 185 } 186 return OperationQuota.OperationType.GET; 187 } 188 189 public static OperationQuota.OperationType 190 getQuotaOperationType(ClientProtos.MutateRequest mutateRequest) { 191 return getQuotaOperationType(mutateRequest.getMutation(), mutateRequest.hasCondition()); 192 } 193 194 private static OperationQuota.OperationType 195 getQuotaOperationType(ClientProtos.MutationProto mutationProto, boolean hasCondition) { 196 ClientProtos.MutationProto.MutationType mutationType = mutationProto.getMutateType(); 197 if ( 198 hasCondition || mutationType == ClientProtos.MutationProto.MutationType.APPEND 199 || mutationType == ClientProtos.MutationProto.MutationType.INCREMENT 200 ) { 201 return OperationQuota.OperationType.CHECK_AND_MUTATE; 202 } 203 return OperationQuota.OperationType.MUTATE; 204 } 205 206 protected static void switchExceedThrottleQuota(final Connection connection, 207 boolean exceedThrottleQuotaEnabled) throws IOException { 208 if (exceedThrottleQuotaEnabled) { 209 checkRSQuotaToEnableExceedThrottle( 210 getRegionServerQuota(connection, QuotaTableUtil.QUOTA_REGION_SERVER_ROW_KEY)); 211 } 212 213 Put put = new Put(getExceedThrottleQuotaRowKey()); 214 put.addColumn(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS, 215 Bytes.toBytes(exceedThrottleQuotaEnabled)); 216 doPut(connection, put); 217 } 218 219 private static void checkRSQuotaToEnableExceedThrottle(Quotas quotas) throws IOException { 220 if (quotas != null && quotas.hasThrottle()) { 221 Throttle throttle = quotas.getThrottle(); 222 // If enable exceed throttle quota, make sure that there are at least one read(req/read + 223 // num/size/cu) and one write(req/write + num/size/cu) region server throttle quotas. 224 boolean hasReadQuota = false; 225 boolean hasWriteQuota = false; 226 if (throttle.hasReqNum() || throttle.hasReqSize() || throttle.hasReqCapacityUnit()) { 227 hasReadQuota = true; 228 hasWriteQuota = true; 229 } 230 if ( 231 !hasReadQuota 232 && (throttle.hasReadNum() || throttle.hasReadSize() || throttle.hasReadCapacityUnit()) 233 ) { 234 hasReadQuota = true; 235 } 236 if (!hasReadQuota) { 237 throw new DoNotRetryIOException( 238 "Please set at least one read region server quota before enable exceed throttle quota"); 239 } 240 if ( 241 !hasWriteQuota 242 && (throttle.hasWriteNum() || throttle.hasWriteSize() || throttle.hasWriteCapacityUnit()) 243 ) { 244 hasWriteQuota = true; 245 } 246 if (!hasWriteQuota) { 247 throw new DoNotRetryIOException("Please set at least one write region server quota " 248 + "before enable exceed throttle quota"); 249 } 250 // If enable exceed throttle quota, make sure that region server throttle quotas are in 251 // seconds time unit. Because once previous requests exceed their quota and consume region 252 // server quota, quota in other time units may be refilled in a long time, this may affect 253 // later requests. 254 List<Pair<Boolean, TimedQuota>> list = 255 Arrays.asList(Pair.newPair(throttle.hasReqNum(), throttle.getReqNum()), 256 Pair.newPair(throttle.hasReadNum(), throttle.getReadNum()), 257 Pair.newPair(throttle.hasWriteNum(), throttle.getWriteNum()), 258 Pair.newPair(throttle.hasReqSize(), throttle.getReqSize()), 259 Pair.newPair(throttle.hasReadSize(), throttle.getReadSize()), 260 Pair.newPair(throttle.hasWriteSize(), throttle.getWriteSize()), 261 Pair.newPair(throttle.hasReqCapacityUnit(), throttle.getReqCapacityUnit()), 262 Pair.newPair(throttle.hasReadCapacityUnit(), throttle.getReadCapacityUnit()), 263 Pair.newPair(throttle.hasWriteCapacityUnit(), throttle.getWriteCapacityUnit())); 264 for (Pair<Boolean, TimedQuota> pair : list) { 265 if (pair.getFirst()) { 266 if (pair.getSecond().getTimeUnit() != TimeUnit.SECONDS) { 267 throw new DoNotRetryIOException("All region server quota must be " 268 + "in seconds time unit if enable exceed throttle quota"); 269 } 270 } 271 } 272 } else { 273 // If enable exceed throttle quota, make sure that region server quota is already set 274 throw new DoNotRetryIOException( 275 "Please set region server quota before enable exceed throttle quota"); 276 } 277 } 278 279 protected static boolean isExceedThrottleQuotaEnabled(final Connection connection) 280 throws IOException { 281 Get get = new Get(getExceedThrottleQuotaRowKey()); 282 get.addColumn(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); 283 Result result = doGet(connection, get); 284 if (result.isEmpty()) { 285 return false; 286 } 287 return Bytes.toBoolean(result.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS)); 288 } 289 290 private static void addQuotas(final Connection connection, final byte[] rowKey, final Quotas data) 291 throws IOException { 292 addQuotas(connection, rowKey, QUOTA_QUALIFIER_SETTINGS, data); 293 } 294 295 private static void addQuotas(final Connection connection, final byte[] rowKey, 296 final byte[] qualifier, final Quotas data) throws IOException { 297 Put put = new Put(rowKey); 298 put.addColumn(QUOTA_FAMILY_INFO, qualifier, quotasToData(data)); 299 doPut(connection, put); 300 } 301 302 private static void deleteQuotas(final Connection connection, final byte[] rowKey) 303 throws IOException { 304 deleteQuotas(connection, rowKey, null); 305 } 306 307 private static void deleteQuotas(final Connection connection, final byte[] rowKey, 308 final byte[] qualifier) throws IOException { 309 Delete delete = new Delete(rowKey); 310 if (qualifier != null) { 311 delete.addColumns(QUOTA_FAMILY_INFO, qualifier); 312 } 313 if (isNamespaceRowKey(rowKey)) { 314 String ns = getNamespaceFromRowKey(rowKey); 315 Quotas namespaceQuota = getNamespaceQuota(connection, ns); 316 if (namespaceQuota != null && namespaceQuota.hasSpace()) { 317 // When deleting namespace space quota, also delete table usage(u:p) snapshots 318 deleteTableUsageSnapshotsForNamespace(connection, ns); 319 } 320 } 321 doDelete(connection, delete); 322 } 323 324 public static Map<String, UserQuotaState> fetchUserQuotas(final Connection connection, 325 final List<Get> gets, Map<TableName, Double> tableMachineQuotaFactors, double factor) 326 throws IOException { 327 long nowTs = EnvironmentEdgeManager.currentTime(); 328 Result[] results = doGet(connection, gets); 329 330 Map<String, UserQuotaState> userQuotas = new HashMap<>(results.length); 331 for (int i = 0; i < results.length; ++i) { 332 byte[] key = gets.get(i).getRow(); 333 assert isUserRowKey(key); 334 String user = getUserFromRowKey(key); 335 336 if (results[i].isEmpty()) { 337 userQuotas.put(user, buildDefaultUserQuotaState(connection.getConfiguration(), nowTs)); 338 continue; 339 } 340 341 final UserQuotaState quotaInfo = new UserQuotaState(nowTs); 342 userQuotas.put(user, quotaInfo); 343 344 assert Bytes.equals(key, results[i].getRow()); 345 346 try { 347 parseUserResult(user, results[i], new UserQuotasVisitor() { 348 @Override 349 public void visitUserQuotas(String userName, String namespace, Quotas quotas) { 350 quotas = updateClusterQuotaToMachineQuota(quotas, factor); 351 quotaInfo.setQuotas(namespace, quotas); 352 } 353 354 @Override 355 public void visitUserQuotas(String userName, TableName table, Quotas quotas) { 356 quotas = updateClusterQuotaToMachineQuota(quotas, 357 tableMachineQuotaFactors.containsKey(table) 358 ? tableMachineQuotaFactors.get(table) 359 : 1); 360 quotaInfo.setQuotas(table, quotas); 361 } 362 363 @Override 364 public void visitUserQuotas(String userName, Quotas quotas) { 365 quotas = updateClusterQuotaToMachineQuota(quotas, factor); 366 quotaInfo.setQuotas(quotas); 367 } 368 }); 369 } catch (IOException e) { 370 LOG.error("Unable to parse user '" + user + "' quotas", e); 371 userQuotas.remove(user); 372 } 373 } 374 return userQuotas; 375 } 376 377 protected static UserQuotaState buildDefaultUserQuotaState(Configuration conf, long nowTs) { 378 QuotaProtos.Throttle.Builder throttleBuilder = QuotaProtos.Throttle.newBuilder(); 379 380 buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_READ_NUM) 381 .ifPresent(throttleBuilder::setReadNum); 382 buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_READ_SIZE) 383 .ifPresent(throttleBuilder::setReadSize); 384 buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_REQUEST_NUM) 385 .ifPresent(throttleBuilder::setReqNum); 386 buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_REQUEST_SIZE) 387 .ifPresent(throttleBuilder::setReqSize); 388 buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_WRITE_NUM) 389 .ifPresent(throttleBuilder::setWriteNum); 390 buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_WRITE_SIZE) 391 .ifPresent(throttleBuilder::setWriteSize); 392 393 UserQuotaState state = new UserQuotaState(nowTs); 394 QuotaProtos.Quotas defaultQuotas = 395 QuotaProtos.Quotas.newBuilder().setThrottle(throttleBuilder.build()).build(); 396 state.setQuotas(defaultQuotas); 397 return state; 398 } 399 400 private static Optional<TimedQuota> buildDefaultTimedQuota(Configuration conf, String key) { 401 int defaultSoftLimit = conf.getInt(key, -1); 402 if (defaultSoftLimit == -1) { 403 return Optional.empty(); 404 } 405 return Optional.of(ProtobufUtil.toTimedQuota(defaultSoftLimit, 406 java.util.concurrent.TimeUnit.SECONDS, org.apache.hadoop.hbase.quotas.QuotaScope.MACHINE)); 407 } 408 409 public static Map<TableName, QuotaState> fetchTableQuotas(final Connection connection, 410 final List<Get> gets, Map<TableName, Double> tableMachineFactors) throws IOException { 411 return fetchGlobalQuotas("table", connection, gets, new KeyFromRow<TableName>() { 412 @Override 413 public TableName getKeyFromRow(final byte[] row) { 414 assert isTableRowKey(row); 415 return getTableFromRowKey(row); 416 } 417 418 @Override 419 public double getFactor(TableName tableName) { 420 return tableMachineFactors.containsKey(tableName) ? tableMachineFactors.get(tableName) : 1; 421 } 422 }); 423 } 424 425 public static Map<String, QuotaState> fetchNamespaceQuotas(final Connection connection, 426 final List<Get> gets, double factor) throws IOException { 427 return fetchGlobalQuotas("namespace", connection, gets, new KeyFromRow<String>() { 428 @Override 429 public String getKeyFromRow(final byte[] row) { 430 assert isNamespaceRowKey(row); 431 return getNamespaceFromRowKey(row); 432 } 433 434 @Override 435 public double getFactor(String s) { 436 return factor; 437 } 438 }); 439 } 440 441 public static Map<String, QuotaState> fetchRegionServerQuotas(final Connection connection, 442 final List<Get> gets) throws IOException { 443 return fetchGlobalQuotas("regionServer", connection, gets, new KeyFromRow<String>() { 444 @Override 445 public String getKeyFromRow(final byte[] row) { 446 assert isRegionServerRowKey(row); 447 return getRegionServerFromRowKey(row); 448 } 449 450 @Override 451 public double getFactor(String s) { 452 return 1; 453 } 454 }); 455 } 456 457 public static <K> Map<K, QuotaState> fetchGlobalQuotas(final String type, 458 final Connection connection, final List<Get> gets, final KeyFromRow<K> kfr) throws IOException { 459 long nowTs = EnvironmentEdgeManager.currentTime(); 460 Result[] results = doGet(connection, gets); 461 462 Map<K, QuotaState> globalQuotas = new HashMap<>(results.length); 463 for (int i = 0; i < results.length; ++i) { 464 byte[] row = gets.get(i).getRow(); 465 K key = kfr.getKeyFromRow(row); 466 467 QuotaState quotaInfo = new QuotaState(nowTs); 468 globalQuotas.put(key, quotaInfo); 469 470 if (results[i].isEmpty()) continue; 471 assert Bytes.equals(row, results[i].getRow()); 472 473 byte[] data = results[i].getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); 474 if (data == null) continue; 475 476 try { 477 Quotas quotas = quotasFromData(data); 478 quotas = updateClusterQuotaToMachineQuota(quotas, kfr.getFactor(key)); 479 quotaInfo.setQuotas(quotas); 480 } catch (IOException e) { 481 LOG.error("Unable to parse " + type + " '" + key + "' quotas", e); 482 globalQuotas.remove(key); 483 } 484 } 485 return globalQuotas; 486 } 487 488 /** 489 * Convert cluster scope quota to machine scope quota 490 * @param quotas the original quota 491 * @param factor factor used to divide cluster limiter to machine limiter 492 * @return the converted quota whose quota limiters all in machine scope 493 */ 494 private static Quotas updateClusterQuotaToMachineQuota(Quotas quotas, double factor) { 495 Quotas.Builder newQuotas = Quotas.newBuilder(quotas); 496 if (newQuotas.hasThrottle()) { 497 Throttle.Builder throttle = Throttle.newBuilder(newQuotas.getThrottle()); 498 if (throttle.hasReqNum()) { 499 throttle.setReqNum(updateTimedQuota(throttle.getReqNum(), factor)); 500 } 501 if (throttle.hasReqSize()) { 502 throttle.setReqSize(updateTimedQuota(throttle.getReqSize(), factor)); 503 } 504 if (throttle.hasReadNum()) { 505 throttle.setReadNum(updateTimedQuota(throttle.getReadNum(), factor)); 506 } 507 if (throttle.hasReadSize()) { 508 throttle.setReadSize(updateTimedQuota(throttle.getReadSize(), factor)); 509 } 510 if (throttle.hasWriteNum()) { 511 throttle.setWriteNum(updateTimedQuota(throttle.getWriteNum(), factor)); 512 } 513 if (throttle.hasWriteSize()) { 514 throttle.setWriteSize(updateTimedQuota(throttle.getWriteSize(), factor)); 515 } 516 if (throttle.hasReqCapacityUnit()) { 517 throttle.setReqCapacityUnit(updateTimedQuota(throttle.getReqCapacityUnit(), factor)); 518 } 519 if (throttle.hasReadCapacityUnit()) { 520 throttle.setReadCapacityUnit(updateTimedQuota(throttle.getReadCapacityUnit(), factor)); 521 } 522 if (throttle.hasWriteCapacityUnit()) { 523 throttle.setWriteCapacityUnit(updateTimedQuota(throttle.getWriteCapacityUnit(), factor)); 524 } 525 newQuotas.setThrottle(throttle.build()); 526 } 527 return newQuotas.build(); 528 } 529 530 private static TimedQuota updateTimedQuota(TimedQuota timedQuota, double factor) { 531 if (timedQuota.getScope() == QuotaScope.CLUSTER) { 532 TimedQuota.Builder newTimedQuota = TimedQuota.newBuilder(timedQuota); 533 newTimedQuota.setSoftLimit(Math.max(1, (long) (timedQuota.getSoftLimit() * factor))) 534 .setScope(QuotaScope.MACHINE); 535 return newTimedQuota.build(); 536 } else { 537 return timedQuota; 538 } 539 } 540 541 private static interface KeyFromRow<T> { 542 T getKeyFromRow(final byte[] row); 543 544 double getFactor(T t); 545 } 546 547 /* 548 * ========================================================================= HTable helpers 549 */ 550 private static void doPut(final Connection connection, final Put put) throws IOException { 551 try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) { 552 table.put(put); 553 } 554 } 555 556 private static void doDelete(final Connection connection, final Delete delete) 557 throws IOException { 558 try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) { 559 table.delete(delete); 560 } 561 } 562 563 /* 564 * ========================================================================= Data Size Helpers 565 */ 566 public static long calculateMutationSize(final Mutation mutation) { 567 long size = 0; 568 for (Map.Entry<byte[], List<Cell>> entry : mutation.getFamilyCellMap().entrySet()) { 569 for (Cell cell : entry.getValue()) { 570 size += cell.getSerializedSize(); 571 } 572 } 573 return size; 574 } 575 576 public static long calculateResultSize(final Result result) { 577 long size = 0; 578 for (Cell cell : result.rawCells()) { 579 size += cell.getSerializedSize(); 580 } 581 return size; 582 } 583 584 public static long calculateResultSize(final List<Result> results) { 585 long size = 0; 586 for (Result result : results) { 587 for (Cell cell : result.rawCells()) { 588 size += cell.getSerializedSize(); 589 } 590 } 591 return size; 592 } 593 594 public static long calculateCellsSize(final List<Cell> cells) { 595 long size = 0; 596 for (Cell cell : cells) { 597 size += cell.getSerializedSize(); 598 } 599 return size; 600 } 601 602 /** 603 * Method to enable a table, if not already enabled. This method suppresses 604 * {@link TableNotDisabledException} and {@link TableNotFoundException}, if thrown while enabling 605 * the table. 606 * @param conn connection to re-use 607 * @param tableName name of the table to be enabled 608 */ 609 public static void enableTableIfNotEnabled(Connection conn, TableName tableName) 610 throws IOException { 611 try { 612 conn.getAdmin().enableTable(tableName); 613 } catch (TableNotDisabledException | TableNotFoundException e) { 614 // ignore 615 } 616 } 617 618 /** 619 * Method to disable a table, if not already disabled. This method suppresses 620 * {@link TableNotEnabledException}, if thrown while disabling the table. 621 * @param conn connection to re-use 622 * @param tableName table name which has moved into space quota violation 623 */ 624 public static void disableTableIfNotDisabled(Connection conn, TableName tableName) 625 throws IOException { 626 try { 627 conn.getAdmin().disableTable(tableName); 628 } catch (TableNotEnabledException | TableNotFoundException e) { 629 // ignore 630 } 631 } 632}