001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master.assignment; 019 020import java.io.IOException; 021import java.io.InterruptedIOException; 022import java.util.ArrayList; 023import java.util.Arrays; 024import java.util.Collection; 025import java.util.Collections; 026import java.util.HashMap; 027import java.util.List; 028import java.util.Map; 029import java.util.concurrent.Callable; 030import java.util.concurrent.ExecutionException; 031import java.util.concurrent.ExecutorService; 032import java.util.concurrent.Executors; 033import java.util.concurrent.Future; 034import java.util.concurrent.TimeUnit; 035import java.util.stream.Stream; 036import org.apache.hadoop.conf.Configuration; 037import org.apache.hadoop.fs.FileSystem; 038import org.apache.hadoop.fs.Path; 039import org.apache.hadoop.hbase.DoNotRetryIOException; 040import org.apache.hadoop.hbase.HConstants; 041import org.apache.hadoop.hbase.ServerName; 042import org.apache.hadoop.hbase.TableName; 043import org.apache.hadoop.hbase.UnknownRegionException; 044import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 045import org.apache.hadoop.hbase.client.MasterSwitchType; 046import org.apache.hadoop.hbase.client.Mutation; 047import org.apache.hadoop.hbase.client.RegionInfo; 048import org.apache.hadoop.hbase.client.RegionInfoBuilder; 049import org.apache.hadoop.hbase.client.TableDescriptor; 050import org.apache.hadoop.hbase.io.hfile.CacheConfig; 051import org.apache.hadoop.hbase.master.MasterCoprocessorHost; 052import org.apache.hadoop.hbase.master.MasterFileSystem; 053import org.apache.hadoop.hbase.master.RegionState.State; 054import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan; 055import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure; 056import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; 057import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; 058import org.apache.hadoop.hbase.procedure2.ProcedureMetrics; 059import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; 060import org.apache.hadoop.hbase.quotas.QuotaExceededException; 061import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; 062import org.apache.hadoop.hbase.regionserver.HStore; 063import org.apache.hadoop.hbase.regionserver.HStoreFile; 064import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy; 065import org.apache.hadoop.hbase.regionserver.RegionSplitRestriction; 066import org.apache.hadoop.hbase.regionserver.StoreFileInfo; 067import org.apache.hadoop.hbase.regionserver.StoreUtils; 068import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; 069import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; 070import org.apache.hadoop.hbase.util.Bytes; 071import org.apache.hadoop.hbase.util.CommonFSUtils; 072import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 073import org.apache.hadoop.hbase.util.FSUtils; 074import org.apache.hadoop.hbase.util.Pair; 075import org.apache.hadoop.hbase.util.Threads; 076import org.apache.hadoop.hbase.wal.WALSplitUtil; 077import org.apache.hadoop.util.ReflectionUtils; 078import org.apache.yetus.audience.InterfaceAudience; 079import org.slf4j.Logger; 080import org.slf4j.LoggerFactory; 081 082import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; 083 084import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; 086import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; 087import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState; 088 089/** 090 * The procedure to split a region in a table. Takes lock on the parent region. It holds the lock 091 * for the life of the procedure. 092 * <p> 093 * Throws exception on construction if determines context hostile to spllt (cluster going down or 094 * master is shutting down or table is disabled). 095 * </p> 096 */ 097@InterfaceAudience.Private 098public class SplitTableRegionProcedure 099 extends AbstractStateMachineRegionProcedure<SplitTableRegionState> { 100 private static final Logger LOG = LoggerFactory.getLogger(SplitTableRegionProcedure.class); 101 private RegionInfo daughterOneRI; 102 private RegionInfo daughterTwoRI; 103 private byte[] bestSplitRow; 104 private RegionSplitPolicy splitPolicy; 105 106 public SplitTableRegionProcedure() { 107 // Required by the Procedure framework to create the procedure on replay 108 } 109 110 public SplitTableRegionProcedure(final MasterProcedureEnv env, final RegionInfo regionToSplit, 111 final byte[] splitRow) throws IOException { 112 super(env, regionToSplit); 113 preflightChecks(env, true); 114 // When procedure goes to run in its prepare step, it also does these checkOnline checks. Here 115 // we fail-fast on construction. There it skips the split with just a warning. 116 checkOnline(env, regionToSplit); 117 this.bestSplitRow = splitRow; 118 TableDescriptor tableDescriptor = 119 env.getMasterServices().getTableDescriptors().get(getTableName()); 120 Configuration conf = env.getMasterConfiguration(); 121 if (hasBestSplitRow()) { 122 // Apply the split restriction for the table to the user-specified split point 123 RegionSplitRestriction splitRestriction = 124 RegionSplitRestriction.create(tableDescriptor, conf); 125 byte[] restrictedSplitRow = splitRestriction.getRestrictedSplitPoint(bestSplitRow); 126 if (!Bytes.equals(bestSplitRow, restrictedSplitRow)) { 127 LOG.warn( 128 "The specified split point {} violates the split restriction of the table. " 129 + "Using {} as a split point.", 130 Bytes.toStringBinary(bestSplitRow), Bytes.toStringBinary(restrictedSplitRow)); 131 bestSplitRow = restrictedSplitRow; 132 } 133 } 134 checkSplittable(env, regionToSplit); 135 final TableName table = regionToSplit.getTable(); 136 final long rid = getDaughterRegionIdTimestamp(regionToSplit); 137 this.daughterOneRI = 138 RegionInfoBuilder.newBuilder(table).setStartKey(regionToSplit.getStartKey()) 139 .setEndKey(bestSplitRow).setSplit(false).setRegionId(rid).build(); 140 this.daughterTwoRI = RegionInfoBuilder.newBuilder(table).setStartKey(bestSplitRow) 141 .setEndKey(regionToSplit.getEndKey()).setSplit(false).setRegionId(rid).build(); 142 143 if (tableDescriptor.getRegionSplitPolicyClassName() != null) { 144 // Since we don't have region reference here, creating the split policy instance without it. 145 // This can be used to invoke methods which don't require Region reference. This instantiation 146 // of a class on Master-side though it only makes sense on the RegionServer-side is 147 // for Phoenix Local Indexing. Refer HBASE-12583 for more information. 148 Class<? extends RegionSplitPolicy> clazz = 149 RegionSplitPolicy.getSplitPolicyClass(tableDescriptor, conf); 150 this.splitPolicy = ReflectionUtils.newInstance(clazz, conf); 151 } 152 } 153 154 @Override 155 protected LockState acquireLock(final MasterProcedureEnv env) { 156 if ( 157 env.getProcedureScheduler().waitRegions(this, getTableName(), getParentRegion(), 158 daughterOneRI, daughterTwoRI) 159 ) { 160 try { 161 LOG.debug(LockState.LOCK_EVENT_WAIT + " " + env.getProcedureScheduler().dumpLocks()); 162 } catch (IOException e) { 163 // Ignore, just for logging 164 } 165 return LockState.LOCK_EVENT_WAIT; 166 } 167 return LockState.LOCK_ACQUIRED; 168 } 169 170 @Override 171 protected void releaseLock(final MasterProcedureEnv env) { 172 env.getProcedureScheduler().wakeRegions(this, getTableName(), getParentRegion(), daughterOneRI, 173 daughterTwoRI); 174 } 175 176 public RegionInfo getDaughterOneRI() { 177 return daughterOneRI; 178 } 179 180 public RegionInfo getDaughterTwoRI() { 181 return daughterTwoRI; 182 } 183 184 private boolean hasBestSplitRow() { 185 return bestSplitRow != null && bestSplitRow.length > 0; 186 } 187 188 /** 189 * Check whether the region is splittable 190 * @param env MasterProcedureEnv 191 * @param regionToSplit parent Region to be split 192 */ 193 private void checkSplittable(final MasterProcedureEnv env, final RegionInfo regionToSplit) 194 throws IOException { 195 // Ask the remote RS if this region is splittable. 196 // If we get an IOE, report it along w/ the failure so can see why we are not splittable at 197 // this time. 198 if (regionToSplit.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { 199 throw new IllegalArgumentException("Can't invoke split on non-default regions directly"); 200 } 201 RegionStateNode node = 202 env.getAssignmentManager().getRegionStates().getRegionStateNode(getParentRegion()); 203 IOException splittableCheckIOE = null; 204 boolean splittable = false; 205 if (node != null) { 206 try { 207 GetRegionInfoResponse response; 208 if (!hasBestSplitRow()) { 209 LOG.info( 210 "{} splitKey isn't explicitly specified, will try to find a best split key from RS {}", 211 node.getRegionInfo().getRegionNameAsString(), node.getRegionLocation()); 212 response = AssignmentManagerUtil.getRegionInfoResponse(env, node.getRegionLocation(), 213 node.getRegionInfo(), true); 214 bestSplitRow = 215 response.hasBestSplitRow() ? response.getBestSplitRow().toByteArray() : null; 216 } else { 217 response = AssignmentManagerUtil.getRegionInfoResponse(env, node.getRegionLocation(), 218 node.getRegionInfo(), false); 219 } 220 splittable = response.hasSplittable() && response.getSplittable(); 221 if (LOG.isDebugEnabled()) { 222 LOG.debug("Splittable=" + splittable + " " + node.toShortString()); 223 } 224 } catch (IOException e) { 225 splittableCheckIOE = e; 226 } 227 } 228 229 if (!splittable) { 230 IOException e = 231 new DoNotRetryIOException(regionToSplit.getShortNameToLog() + " NOT splittable"); 232 if (splittableCheckIOE != null) { 233 e.initCause(splittableCheckIOE); 234 } 235 throw e; 236 } 237 238 if (!hasBestSplitRow()) { 239 throw new DoNotRetryIOException("Region not splittable because bestSplitPoint = null, " 240 + "maybe table is too small for auto split. For force split, try specifying split row"); 241 } 242 243 if (Bytes.equals(regionToSplit.getStartKey(), bestSplitRow)) { 244 throw new DoNotRetryIOException( 245 "Split row is equal to startkey: " + Bytes.toStringBinary(bestSplitRow)); 246 } 247 248 if (!regionToSplit.containsRow(bestSplitRow)) { 249 throw new DoNotRetryIOException("Split row is not inside region key range splitKey:" 250 + Bytes.toStringBinary(bestSplitRow) + " region: " + regionToSplit); 251 } 252 } 253 254 /** 255 * Calculate daughter regionid to use. 256 * @param hri Parent {@link RegionInfo} 257 * @return Daughter region id (timestamp) to use. 258 */ 259 private static long getDaughterRegionIdTimestamp(final RegionInfo hri) { 260 long rid = EnvironmentEdgeManager.currentTime(); 261 // Regionid is timestamp. Can't be less than that of parent else will insert 262 // at wrong location in hbase:meta (See HBASE-710). 263 if (rid < hri.getRegionId()) { 264 LOG.warn("Clock skew; parent regions id is " + hri.getRegionId() 265 + " but current time here is " + rid); 266 rid = hri.getRegionId() + 1; 267 } 268 return rid; 269 } 270 271 private void removeNonDefaultReplicas(MasterProcedureEnv env) throws IOException { 272 AssignmentManagerUtil.removeNonDefaultReplicas(env, Stream.of(getParentRegion()), 273 getRegionReplication(env)); 274 } 275 276 private void checkClosedRegions(MasterProcedureEnv env) throws IOException { 277 // theoretically this should not happen any more after we use TRSP, but anyway let's add a check 278 // here 279 AssignmentManagerUtil.checkClosedRegion(env, getParentRegion()); 280 } 281 282 @Override 283 protected Flow executeFromState(MasterProcedureEnv env, SplitTableRegionState state) 284 throws InterruptedException { 285 LOG.trace("{} execute state={}", this, state); 286 287 try { 288 switch (state) { 289 case SPLIT_TABLE_REGION_PREPARE: 290 if (prepareSplitRegion(env)) { 291 setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION); 292 break; 293 } else { 294 return Flow.NO_MORE_STATE; 295 } 296 case SPLIT_TABLE_REGION_PRE_OPERATION: 297 preSplitRegion(env); 298 setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CLOSE_PARENT_REGION); 299 break; 300 case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION: 301 addChildProcedure(createUnassignProcedures(env)); 302 // createUnassignProcedures() can throw out IOException. If this happens, 303 // it wont reach state SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGION and no parent regions 304 // is closed as all created UnassignProcedures are rolled back. If it rolls back with 305 // state SPLIT_TABLE_REGION_CLOSE_PARENT_REGION, no need to call openParentRegion(), 306 // otherwise, it will result in OpenRegionProcedure for an already open region. 307 setNextState(SplitTableRegionState.SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS); 308 break; 309 case SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS: 310 checkClosedRegions(env); 311 setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS); 312 break; 313 case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS: 314 removeNonDefaultReplicas(env); 315 createDaughterRegions(env); 316 setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_WRITE_MAX_SEQUENCE_ID_FILE); 317 break; 318 case SPLIT_TABLE_REGION_WRITE_MAX_SEQUENCE_ID_FILE: 319 writeMaxSequenceIdFile(env); 320 setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META); 321 break; 322 case SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META: 323 preSplitRegionBeforeMETA(env); 324 setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_UPDATE_META); 325 break; 326 case SPLIT_TABLE_REGION_UPDATE_META: 327 updateMeta(env); 328 setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META); 329 break; 330 case SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META: 331 preSplitRegionAfterMETA(env); 332 setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS); 333 break; 334 case SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS: 335 addChildProcedure(createAssignProcedures(env)); 336 setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_POST_OPERATION); 337 break; 338 case SPLIT_TABLE_REGION_POST_OPERATION: 339 postSplitRegion(env); 340 return Flow.NO_MORE_STATE; 341 default: 342 throw new UnsupportedOperationException(this + " unhandled state=" + state); 343 } 344 } catch (IOException e) { 345 String msg = "Splitting " + getParentRegion().getEncodedName() + ", " + this; 346 if (!isRollbackSupported(state)) { 347 // We reach a state that cannot be rolled back. We just need to keep retrying. 348 LOG.warn(msg, e); 349 } else { 350 LOG.error(msg, e); 351 setFailure("master-split-regions", e); 352 } 353 } 354 // if split fails, need to call ((HRegion)parent).clearSplit() when it is a force split 355 return Flow.HAS_MORE_STATE; 356 } 357 358 /** 359 * To rollback {@link SplitTableRegionProcedure}, an AssignProcedure is asynchronously submitted 360 * for parent region to be split (rollback doesn't wait on the completion of the AssignProcedure) 361 * . This can be improved by changing rollback() to support sub-procedures. See HBASE-19851 for 362 * details. 363 */ 364 @Override 365 protected void rollbackState(final MasterProcedureEnv env, final SplitTableRegionState state) 366 throws IOException, InterruptedException { 367 LOG.trace("{} rollback state={}", this, state); 368 369 try { 370 switch (state) { 371 case SPLIT_TABLE_REGION_POST_OPERATION: 372 case SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS: 373 case SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META: 374 case SPLIT_TABLE_REGION_UPDATE_META: 375 // PONR 376 throw new UnsupportedOperationException(this + " unhandled state=" + state); 377 case SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META: 378 break; 379 case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS: 380 case SPLIT_TABLE_REGION_WRITE_MAX_SEQUENCE_ID_FILE: 381 deleteDaughterRegions(env); 382 break; 383 case SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS: 384 openParentRegion(env); 385 break; 386 case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION: 387 // If it rolls back with state SPLIT_TABLE_REGION_CLOSE_PARENT_REGION, no need to call 388 // openParentRegion(), otherwise, it will result in OpenRegionProcedure for an 389 // already open region. 390 break; 391 case SPLIT_TABLE_REGION_PRE_OPERATION: 392 postRollBackSplitRegion(env); 393 break; 394 case SPLIT_TABLE_REGION_PREPARE: 395 break; // nothing to do 396 default: 397 throw new UnsupportedOperationException(this + " unhandled state=" + state); 398 } 399 } catch (IOException e) { 400 // This will be retried. Unless there is a bug in the code, 401 // this should be just a "temporary error" (e.g. network down) 402 LOG.warn("pid=" + getProcId() + " failed rollback attempt step " + state 403 + " for splitting the region " + getParentRegion().getEncodedName() + " in table " 404 + getTableName(), e); 405 throw e; 406 } 407 } 408 409 /* 410 * Check whether we are in the state that can be rollback 411 */ 412 @Override 413 protected boolean isRollbackSupported(final SplitTableRegionState state) { 414 switch (state) { 415 case SPLIT_TABLE_REGION_POST_OPERATION: 416 case SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS: 417 case SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META: 418 case SPLIT_TABLE_REGION_UPDATE_META: 419 // It is not safe to rollback if we reach to these states. 420 return false; 421 default: 422 break; 423 } 424 return true; 425 } 426 427 @Override 428 protected SplitTableRegionState getState(final int stateId) { 429 return SplitTableRegionState.forNumber(stateId); 430 } 431 432 @Override 433 protected int getStateId(final SplitTableRegionState state) { 434 return state.getNumber(); 435 } 436 437 @Override 438 protected SplitTableRegionState getInitialState() { 439 return SplitTableRegionState.SPLIT_TABLE_REGION_PREPARE; 440 } 441 442 @Override 443 protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { 444 super.serializeStateData(serializer); 445 446 final MasterProcedureProtos.SplitTableRegionStateData.Builder splitTableRegionMsg = 447 MasterProcedureProtos.SplitTableRegionStateData.newBuilder() 448 .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) 449 .setParentRegionInfo(ProtobufUtil.toRegionInfo(getRegion())) 450 .addChildRegionInfo(ProtobufUtil.toRegionInfo(daughterOneRI)) 451 .addChildRegionInfo(ProtobufUtil.toRegionInfo(daughterTwoRI)); 452 serializer.serialize(splitTableRegionMsg.build()); 453 } 454 455 @Override 456 protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { 457 super.deserializeStateData(serializer); 458 459 final MasterProcedureProtos.SplitTableRegionStateData splitTableRegionsMsg = 460 serializer.deserialize(MasterProcedureProtos.SplitTableRegionStateData.class); 461 setUser(MasterProcedureUtil.toUserInfo(splitTableRegionsMsg.getUserInfo())); 462 setRegion(ProtobufUtil.toRegionInfo(splitTableRegionsMsg.getParentRegionInfo())); 463 assert (splitTableRegionsMsg.getChildRegionInfoCount() == 2); 464 daughterOneRI = ProtobufUtil.toRegionInfo(splitTableRegionsMsg.getChildRegionInfo(0)); 465 daughterTwoRI = ProtobufUtil.toRegionInfo(splitTableRegionsMsg.getChildRegionInfo(1)); 466 } 467 468 @Override 469 public void toStringClassDetails(StringBuilder sb) { 470 sb.append(getClass().getSimpleName()); 471 sb.append(" table="); 472 sb.append(getTableName()); 473 sb.append(", parent="); 474 sb.append(getParentRegion().getShortNameToLog()); 475 sb.append(", daughterA="); 476 sb.append(daughterOneRI.getShortNameToLog()); 477 sb.append(", daughterB="); 478 sb.append(daughterTwoRI.getShortNameToLog()); 479 } 480 481 private RegionInfo getParentRegion() { 482 return getRegion(); 483 } 484 485 @Override 486 public TableOperationType getTableOperationType() { 487 return TableOperationType.REGION_SPLIT; 488 } 489 490 @Override 491 protected ProcedureMetrics getProcedureMetrics(MasterProcedureEnv env) { 492 return env.getAssignmentManager().getAssignmentManagerMetrics().getSplitProcMetrics(); 493 } 494 495 private byte[] getSplitRow() { 496 return daughterTwoRI.getStartKey(); 497 } 498 499 private static final State[] EXPECTED_SPLIT_STATES = new State[] { State.OPEN, State.CLOSED }; 500 501 /** 502 * Prepare to Split region. 503 * @param env MasterProcedureEnv 504 */ 505 public boolean prepareSplitRegion(final MasterProcedureEnv env) throws IOException { 506 // Fail if we are taking snapshot for the given table 507 if ( 508 env.getMasterServices().getSnapshotManager().isTakingSnapshot(getParentRegion().getTable()) 509 ) { 510 setFailure(new IOException("Skip splitting region " + getParentRegion().getShortNameToLog() 511 + ", because we are taking snapshot for the table " + getParentRegion().getTable())); 512 return false; 513 } 514 // Check whether the region is splittable 515 RegionStateNode node = 516 env.getAssignmentManager().getRegionStates().getRegionStateNode(getParentRegion()); 517 518 if (node == null) { 519 throw new UnknownRegionException(getParentRegion().getRegionNameAsString()); 520 } 521 522 RegionInfo parentHRI = node.getRegionInfo(); 523 if (parentHRI == null) { 524 LOG.info("Unsplittable; parent region is null; node={}", node); 525 return false; 526 } 527 // Lookup the parent HRI state from the AM, which has the latest updated info. 528 // Protect against the case where concurrent SPLIT requests came in and succeeded 529 // just before us. 530 if (node.isInState(State.SPLIT)) { 531 LOG.info("Split of " + parentHRI + " skipped; state is already SPLIT"); 532 return false; 533 } 534 if (parentHRI.isSplit() || parentHRI.isOffline()) { 535 LOG.info("Split of " + parentHRI + " skipped because offline/split."); 536 return false; 537 } 538 539 // expected parent to be online or closed 540 if (!node.isInState(EXPECTED_SPLIT_STATES)) { 541 // We may have SPLIT already? 542 setFailure( 543 new IOException("Split " + parentHRI.getRegionNameAsString() + " FAILED because state=" 544 + node.getState() + "; expected " + Arrays.toString(EXPECTED_SPLIT_STATES))); 545 return false; 546 } 547 548 // Mostly this check is not used because we already check the switch before submit a split 549 // procedure. Just for safe, check the switch again. This procedure can be rollbacked if 550 // the switch was set to false after submit. 551 if (!env.getMasterServices().isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) { 552 LOG.warn("pid=" + getProcId() + " split switch is off! skip split of " + parentHRI); 553 setFailure(new IOException( 554 "Split region " + parentHRI.getRegionNameAsString() + " failed due to split switch off")); 555 return false; 556 } 557 558 if (!env.getMasterServices().getTableDescriptors().get(getTableName()).isSplitEnabled()) { 559 LOG.warn("pid={}, split is disabled for the table! Skipping split of {}", getProcId(), 560 parentHRI); 561 setFailure(new IOException("Split region " + parentHRI.getRegionNameAsString() 562 + " failed as region split is disabled for the table")); 563 return false; 564 } 565 566 // set node state as SPLITTING 567 node.setState(State.SPLITTING); 568 569 // Since we have the lock and the master is coordinating the operation 570 // we are always able to split the region 571 return true; 572 } 573 574 /** 575 * Action before splitting region in a table. 576 * @param env MasterProcedureEnv 577 */ 578 private void preSplitRegion(final MasterProcedureEnv env) 579 throws IOException, InterruptedException { 580 final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); 581 if (cpHost != null) { 582 cpHost.preSplitRegionAction(getTableName(), getSplitRow(), getUser()); 583 } 584 585 // TODO: Clean up split and merge. Currently all over the place. 586 // Notify QuotaManager and RegionNormalizer 587 try { 588 env.getMasterServices().getMasterQuotaManager().onRegionSplit(this.getParentRegion()); 589 } catch (QuotaExceededException e) { 590 // TODO: why is this here? split requests can be submitted by actors other than the normalizer 591 env.getMasterServices().getRegionNormalizerManager() 592 .planSkipped(NormalizationPlan.PlanType.SPLIT); 593 throw e; 594 } 595 } 596 597 /** 598 * Action after rollback a split table region action. 599 * @param env MasterProcedureEnv 600 */ 601 private void postRollBackSplitRegion(final MasterProcedureEnv env) throws IOException { 602 final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); 603 if (cpHost != null) { 604 cpHost.postRollBackSplitRegionAction(getUser()); 605 } 606 } 607 608 /** 609 * Rollback close parent region 610 */ 611 private void openParentRegion(MasterProcedureEnv env) throws IOException { 612 AssignmentManagerUtil.reopenRegionsForRollback(env, 613 Collections.singletonList((getParentRegion())), getRegionReplication(env), 614 getParentRegionServerName(env)); 615 } 616 617 /** 618 * Create daughter regions 619 */ 620 public void createDaughterRegions(final MasterProcedureEnv env) throws IOException { 621 final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); 622 final Path tabledir = CommonFSUtils.getTableDir(mfs.getRootDir(), getTableName()); 623 final FileSystem fs = mfs.getFileSystem(); 624 HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem( 625 env.getMasterConfiguration(), fs, tabledir, getParentRegion(), false); 626 regionFs.createSplitsDir(daughterOneRI, daughterTwoRI); 627 628 Pair<List<Path>, List<Path>> expectedReferences = splitStoreFiles(env, regionFs); 629 630 assertSplitResultFilesCount(fs, expectedReferences.getFirst().size(), 631 regionFs.getSplitsDir(daughterOneRI)); 632 regionFs.commitDaughterRegion(daughterOneRI, expectedReferences.getFirst(), env); 633 assertSplitResultFilesCount(fs, expectedReferences.getFirst().size(), 634 new Path(tabledir, daughterOneRI.getEncodedName())); 635 636 assertSplitResultFilesCount(fs, expectedReferences.getSecond().size(), 637 regionFs.getSplitsDir(daughterTwoRI)); 638 regionFs.commitDaughterRegion(daughterTwoRI, expectedReferences.getSecond(), env); 639 assertSplitResultFilesCount(fs, expectedReferences.getSecond().size(), 640 new Path(tabledir, daughterTwoRI.getEncodedName())); 641 } 642 643 private void deleteDaughterRegions(final MasterProcedureEnv env) throws IOException { 644 final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); 645 final Path tabledir = CommonFSUtils.getTableDir(mfs.getRootDir(), getTableName()); 646 HRegionFileSystem.deleteRegionFromFileSystem(env.getMasterConfiguration(), mfs.getFileSystem(), 647 tabledir, daughterOneRI); 648 HRegionFileSystem.deleteRegionFromFileSystem(env.getMasterConfiguration(), mfs.getFileSystem(), 649 tabledir, daughterTwoRI); 650 } 651 652 /** 653 * Create Split directory 654 * @param env MasterProcedureEnv 655 */ 656 private Pair<List<Path>, List<Path>> splitStoreFiles(final MasterProcedureEnv env, 657 final HRegionFileSystem regionFs) throws IOException { 658 final Configuration conf = env.getMasterConfiguration(); 659 TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); 660 // The following code sets up a thread pool executor with as many slots as 661 // there's files to split. It then fires up everything, waits for 662 // completion and finally checks for any exception 663 // 664 // Note: From HBASE-26187, splitStoreFiles now creates daughter region dirs straight under the 665 // table dir. In case of failure, the proc would go through this again, already existing 666 // region dirs and split files would just be ignored, new split files should get created. 667 int nbFiles = 0; 668 final Map<String, Collection<StoreFileInfo>> files = 669 new HashMap<String, Collection<StoreFileInfo>>(htd.getColumnFamilyCount()); 670 for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) { 671 String family = cfd.getNameAsString(); 672 StoreFileTracker tracker = 673 StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, cfd, regionFs); 674 Collection<StoreFileInfo> sfis = tracker.load(); 675 if (sfis == null) { 676 continue; 677 } 678 Collection<StoreFileInfo> filteredSfis = null; 679 for (StoreFileInfo sfi : sfis) { 680 // Filter. There is a lag cleaning up compacted reference files. They get cleared 681 // after a delay in case outstanding Scanners still have references. Because of this, 682 // the listing of the Store content may have straggler reference files. Skip these. 683 // It should be safe to skip references at this point because we checked above with 684 // the region if it thinks it is splittable and if we are here, it thinks it is 685 // splitable. 686 if (sfi.isReference()) { 687 LOG.info("Skipping split of " + sfi + "; presuming ready for archiving."); 688 continue; 689 } 690 if (filteredSfis == null) { 691 filteredSfis = new ArrayList<StoreFileInfo>(sfis.size()); 692 files.put(family, filteredSfis); 693 } 694 filteredSfis.add(sfi); 695 nbFiles++; 696 } 697 } 698 if (nbFiles == 0) { 699 // no file needs to be splitted. 700 return new Pair<>(Collections.emptyList(), Collections.emptyList()); 701 } 702 // Max #threads is the smaller of the number of storefiles or the default max determined above. 703 int maxThreads = Math.min( 704 conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, 705 conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT)), 706 nbFiles); 707 LOG.info("pid=" + getProcId() + " splitting " + nbFiles + " storefiles, region=" 708 + getParentRegion().getShortNameToLog() + ", threads=" + maxThreads); 709 final ExecutorService threadPool = Executors.newFixedThreadPool(maxThreads, 710 new ThreadFactoryBuilder().setNameFormat("StoreFileSplitter-pool-%d").setDaemon(true) 711 .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); 712 final List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles); 713 714 // Split each store file. 715 for (Map.Entry<String, Collection<StoreFileInfo>> e : files.entrySet()) { 716 byte[] familyName = Bytes.toBytes(e.getKey()); 717 final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName); 718 final Collection<StoreFileInfo> storeFiles = e.getValue(); 719 if (storeFiles != null && storeFiles.size() > 0) { 720 final Configuration storeConfiguration = 721 StoreUtils.createStoreConfiguration(env.getMasterConfiguration(), htd, hcd); 722 for (StoreFileInfo storeFileInfo : storeFiles) { 723 // As this procedure is running on master, use CacheConfig.DISABLED means 724 // don't cache any block. 725 // We also need to pass through a suitable CompoundConfiguration as if this 726 // is running in a regionserver's Store context, or we might not be able 727 // to read the hfiles. 728 storeFileInfo.setConf(storeConfiguration); 729 StoreFileSplitter sfs = new StoreFileSplitter(regionFs, familyName, 730 new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED)); 731 futures.add(threadPool.submit(sfs)); 732 } 733 } 734 } 735 // Shutdown the pool 736 threadPool.shutdown(); 737 738 // Wait for all the tasks to finish. 739 // When splits ran on the RegionServer, how-long-to-wait-configuration was named 740 // hbase.regionserver.fileSplitTimeout. If set, use its value. 741 long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", 742 conf.getLong("hbase.regionserver.fileSplitTimeout", 600000)); 743 try { 744 boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS); 745 if (stillRunning) { 746 threadPool.shutdownNow(); 747 // wait for the thread to shutdown completely. 748 while (!threadPool.isTerminated()) { 749 Thread.sleep(50); 750 } 751 throw new IOException( 752 "Took too long to split the" + " files and create the references, aborting split"); 753 } 754 } catch (InterruptedException e) { 755 throw (InterruptedIOException) new InterruptedIOException().initCause(e); 756 } 757 758 List<Path> daughterA = new ArrayList<>(); 759 List<Path> daughterB = new ArrayList<>(); 760 // Look for any exception 761 for (Future<Pair<Path, Path>> future : futures) { 762 try { 763 Pair<Path, Path> p = future.get(); 764 if (p.getFirst() != null) { 765 daughterA.add(p.getFirst()); 766 } 767 if (p.getSecond() != null) { 768 daughterB.add(p.getSecond()); 769 } 770 } catch (InterruptedException e) { 771 throw (InterruptedIOException) new InterruptedIOException().initCause(e); 772 } catch (ExecutionException e) { 773 throw new IOException(e); 774 } 775 } 776 777 if (LOG.isDebugEnabled()) { 778 LOG.debug("pid=" + getProcId() + " split storefiles for region " 779 + getParentRegion().getShortNameToLog() + " Daughter A: " + daughterA 780 + " storefiles, Daughter B: " + daughterB + " storefiles."); 781 } 782 return new Pair<>(daughterA, daughterB); 783 } 784 785 private void assertSplitResultFilesCount(final FileSystem fs, 786 final int expectedSplitResultFileCount, Path dir) throws IOException { 787 if (expectedSplitResultFileCount != 0) { 788 int resultFileCount = FSUtils.getRegionReferenceAndLinkFileCount(fs, dir); 789 if (expectedSplitResultFileCount != resultFileCount) { 790 throw new IOException("Failing split. Didn't have expected reference and HFileLink files" 791 + ", expected=" + expectedSplitResultFileCount + ", actual=" + resultFileCount); 792 } 793 } 794 } 795 796 private Pair<Path, Path> splitStoreFile(HRegionFileSystem regionFs, byte[] family, HStoreFile sf) 797 throws IOException { 798 if (LOG.isDebugEnabled()) { 799 LOG.debug("pid=" + getProcId() + " splitting started for store file: " + sf.getPath() 800 + " for region: " + getParentRegion().getShortNameToLog()); 801 } 802 803 final byte[] splitRow = getSplitRow(); 804 final String familyName = Bytes.toString(family); 805 final Path path_first = 806 regionFs.splitStoreFile(this.daughterOneRI, familyName, sf, splitRow, false, splitPolicy); 807 final Path path_second = 808 regionFs.splitStoreFile(this.daughterTwoRI, familyName, sf, splitRow, true, splitPolicy); 809 if (LOG.isDebugEnabled()) { 810 LOG.debug("pid=" + getProcId() + " splitting complete for store file: " + sf.getPath() 811 + " for region: " + getParentRegion().getShortNameToLog()); 812 } 813 return new Pair<Path, Path>(path_first, path_second); 814 } 815 816 /** 817 * Utility class used to do the file splitting / reference writing in parallel instead of 818 * sequentially. 819 */ 820 private class StoreFileSplitter implements Callable<Pair<Path, Path>> { 821 private final HRegionFileSystem regionFs; 822 private final byte[] family; 823 private final HStoreFile sf; 824 825 /** 826 * Constructor that takes what it needs to split 827 * @param regionFs the file system 828 * @param family Family that contains the store file 829 * @param sf which file 830 */ 831 public StoreFileSplitter(HRegionFileSystem regionFs, byte[] family, HStoreFile sf) { 832 this.regionFs = regionFs; 833 this.sf = sf; 834 this.family = family; 835 } 836 837 @Override 838 public Pair<Path, Path> call() throws IOException { 839 return splitStoreFile(regionFs, family, sf); 840 } 841 } 842 843 /** 844 * Post split region actions before the Point-of-No-Return step 845 * @param env MasterProcedureEnv 846 **/ 847 private void preSplitRegionBeforeMETA(final MasterProcedureEnv env) 848 throws IOException, InterruptedException { 849 final List<Mutation> metaEntries = new ArrayList<Mutation>(); 850 final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); 851 if (cpHost != null) { 852 cpHost.preSplitBeforeMETAAction(getSplitRow(), metaEntries, getUser()); 853 try { 854 for (Mutation p : metaEntries) { 855 RegionInfo.parseRegionName(p.getRow()); 856 } 857 } catch (IOException e) { 858 LOG.error("pid=" + getProcId() + " row key of mutation from coprocessor not parsable as " 859 + "region name." + "Mutations from coprocessor should only for hbase:meta table."); 860 throw e; 861 } 862 } 863 } 864 865 /** 866 * Add daughter regions to META 867 * @param env MasterProcedureEnv 868 */ 869 private void updateMeta(final MasterProcedureEnv env) throws IOException { 870 env.getAssignmentManager().markRegionAsSplit(getParentRegion(), getParentRegionServerName(env), 871 daughterOneRI, daughterTwoRI); 872 } 873 874 /** 875 * Pre split region actions after the Point-of-No-Return step 876 * @param env MasterProcedureEnv 877 **/ 878 private void preSplitRegionAfterMETA(final MasterProcedureEnv env) 879 throws IOException, InterruptedException { 880 final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); 881 if (cpHost != null) { 882 cpHost.preSplitAfterMETAAction(getUser()); 883 } 884 } 885 886 /** 887 * Post split region actions 888 * @param env MasterProcedureEnv 889 **/ 890 private void postSplitRegion(final MasterProcedureEnv env) throws IOException { 891 final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); 892 if (cpHost != null) { 893 cpHost.postCompletedSplitRegionAction(daughterOneRI, daughterTwoRI, getUser()); 894 } 895 } 896 897 private ServerName getParentRegionServerName(final MasterProcedureEnv env) { 898 return env.getMasterServices().getAssignmentManager().getRegionStates() 899 .getRegionServerOfRegion(getParentRegion()); 900 } 901 902 private TransitRegionStateProcedure[] createUnassignProcedures(MasterProcedureEnv env) 903 throws IOException { 904 return AssignmentManagerUtil.createUnassignProceduresForSplitOrMerge(env, 905 Stream.of(getParentRegion()), getRegionReplication(env)); 906 } 907 908 private TransitRegionStateProcedure[] createAssignProcedures(MasterProcedureEnv env) 909 throws IOException { 910 List<RegionInfo> hris = new ArrayList<RegionInfo>(2); 911 hris.add(daughterOneRI); 912 hris.add(daughterTwoRI); 913 return AssignmentManagerUtil.createAssignProceduresForSplitDaughters(env, hris, 914 getRegionReplication(env), getParentRegionServerName(env)); 915 } 916 917 private int getRegionReplication(final MasterProcedureEnv env) throws IOException { 918 final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); 919 return htd.getRegionReplication(); 920 } 921 922 private void writeMaxSequenceIdFile(MasterProcedureEnv env) throws IOException { 923 MasterFileSystem fs = env.getMasterFileSystem(); 924 long maxSequenceId = WALSplitUtil.getMaxRegionSequenceId(env.getMasterConfiguration(), 925 getParentRegion(), fs::getFileSystem, fs::getWALFileSystem); 926 if (maxSequenceId > 0) { 927 WALSplitUtil.writeRegionSequenceIdFile(fs.getWALFileSystem(), 928 getWALRegionDir(env, daughterOneRI), maxSequenceId); 929 WALSplitUtil.writeRegionSequenceIdFile(fs.getWALFileSystem(), 930 getWALRegionDir(env, daughterTwoRI), maxSequenceId); 931 } 932 } 933 934 @Override 935 protected boolean abort(MasterProcedureEnv env) { 936 // Abort means rollback. We can't rollback all steps. HBASE-18018 added abort to all 937 // Procedures. Here is a Procedure that has a PONR and cannot be aborted wants it enters this 938 // range of steps; what do we do for these should an operator want to cancel them? HBASE-20022. 939 return isRollbackSupported(getCurrentState()) ? super.abort(env) : false; 940 } 941}