001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.procedure2.store.region; 019 020import static org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY; 021import static org.apache.hadoop.hbase.HConstants.NO_NONCE; 022import static org.apache.hadoop.hbase.master.region.MasterRegionFactory.PROC_FAMILY; 023 024import java.io.IOException; 025import java.io.UncheckedIOException; 026import java.util.ArrayList; 027import java.util.Arrays; 028import java.util.Collections; 029import java.util.HashMap; 030import java.util.List; 031import java.util.Map; 032import java.util.Optional; 033import org.apache.commons.lang3.mutable.MutableLong; 034import org.apache.hadoop.conf.Configuration; 035import org.apache.hadoop.fs.FileSystem; 036import org.apache.hadoop.fs.Path; 037import org.apache.hadoop.hbase.Cell; 038import org.apache.hadoop.hbase.HBaseIOException; 039import org.apache.hadoop.hbase.Server; 040import org.apache.hadoop.hbase.client.Delete; 041import org.apache.hadoop.hbase.client.Mutation; 042import org.apache.hadoop.hbase.client.Put; 043import org.apache.hadoop.hbase.client.Scan; 044import org.apache.hadoop.hbase.ipc.RpcCall; 045import org.apache.hadoop.hbase.ipc.RpcServer; 046import org.apache.hadoop.hbase.log.HBaseMarkers; 047import org.apache.hadoop.hbase.master.assignment.AssignProcedure; 048import org.apache.hadoop.hbase.master.assignment.MoveRegionProcedure; 049import org.apache.hadoop.hbase.master.assignment.UnassignProcedure; 050import org.apache.hadoop.hbase.master.procedure.RecoverMetaProcedure; 051import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; 052import org.apache.hadoop.hbase.master.region.MasterRegion; 053import org.apache.hadoop.hbase.procedure2.Procedure; 054import org.apache.hadoop.hbase.procedure2.ProcedureUtil; 055import org.apache.hadoop.hbase.procedure2.store.LeaseRecovery; 056import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreBase; 057import org.apache.hadoop.hbase.procedure2.store.ProcedureTree; 058import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; 059import org.apache.hadoop.hbase.regionserver.RegionScanner; 060import org.apache.hadoop.hbase.util.Bytes; 061import org.apache.hadoop.hbase.util.CommonFSUtils; 062import org.apache.yetus.audience.InterfaceAudience; 063import org.slf4j.Logger; 064import org.slf4j.LoggerFactory; 065 066import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; 067 068import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; 069 070/** 071 * A procedure store which uses the master local store to store all the procedures. 072 * <p/> 073 * We use proc:d column to store the serialized protobuf format procedure, and when deleting we will 074 * first fill the info:proc column with an empty byte array, and then actually delete them in the 075 * {@link #cleanup()} method. This is because that we need to retain the max procedure id, so we can 076 * not directly delete a procedure row as we do not know if it is the one with the max procedure id. 077 */ 078@InterfaceAudience.Private 079public class RegionProcedureStore extends ProcedureStoreBase { 080 081 private static final Logger LOG = LoggerFactory.getLogger(RegionProcedureStore.class); 082 083 static final byte[] PROC_QUALIFIER = Bytes.toBytes("d"); 084 085 private final Server server; 086 087 private final LeaseRecovery leaseRecovery; 088 089 final MasterRegion region; 090 091 private int numThreads; 092 093 public RegionProcedureStore(Server server, MasterRegion region, LeaseRecovery leaseRecovery) { 094 this.server = server; 095 this.region = region; 096 this.leaseRecovery = leaseRecovery; 097 } 098 099 @Override 100 public void start(int numThreads) throws IOException { 101 if (!setRunning(true)) { 102 return; 103 } 104 LOG.info("Starting the Region Procedure Store, number threads={}", numThreads); 105 this.numThreads = numThreads; 106 } 107 108 @Override 109 public void stop(boolean abort) { 110 if (!setRunning(false)) { 111 return; 112 } 113 LOG.info("Stopping the Region Procedure Store, isAbort={}", abort); 114 } 115 116 @Override 117 public int getNumThreads() { 118 return numThreads; 119 } 120 121 @Override 122 public int setRunningProcedureCount(int count) { 123 // useless for region based storage. 124 return count; 125 } 126 127 @SuppressWarnings("deprecation") 128 private static final ImmutableSet<Class<?>> UNSUPPORTED_PROCEDURES = 129 ImmutableSet.of(RecoverMetaProcedure.class, AssignProcedure.class, UnassignProcedure.class, 130 MoveRegionProcedure.class); 131 132 /** 133 * In HBASE-20811, we have introduced a new TRSP to assign/unassign/move regions, and it is 134 * incompatible with the old AssignProcedure/UnassignProcedure/MoveRegionProcedure. So we need to 135 * make sure that there are none these procedures when upgrading. If there are, the master will 136 * quit, you need to go back to the old version to finish these procedures first before upgrading. 137 */ 138 private void checkUnsupportedProcedure(Map<Class<?>, List<Procedure<?>>> procsByType) 139 throws HBaseIOException { 140 // Confirm that we do not have unfinished assign/unassign related procedures. It is not easy to 141 // support both the old assign/unassign procedures and the new TransitRegionStateProcedure as 142 // there will be conflict in the code for AM. We should finish all these procedures before 143 // upgrading. 144 for (Class<?> clazz : UNSUPPORTED_PROCEDURES) { 145 List<Procedure<?>> procs = procsByType.get(clazz); 146 if (procs != null) { 147 LOG.error("Unsupported procedure type {} found, please rollback your master to the old" + 148 " version to finish them, and then try to upgrade again." + 149 " See https://hbase.apache.org/book.html#upgrade2.2 for more details." + 150 " The full procedure list: {}", clazz, procs); 151 throw new HBaseIOException("Unsupported procedure type " + clazz + " found"); 152 } 153 } 154 // A special check for SCP, as we do not support RecoverMetaProcedure any more so we need to 155 // make sure that no one will try to schedule it but SCP does have a state which will schedule 156 // it. 157 if (procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream() 158 .map(p -> (ServerCrashProcedure) p).anyMatch(ServerCrashProcedure::isInRecoverMetaState)) { 159 LOG.error("At least one ServerCrashProcedure is going to schedule a RecoverMetaProcedure," + 160 " which is not supported any more. Please rollback your master to the old version to" + 161 " finish them, and then try to upgrade again." + 162 " See https://hbase.apache.org/book.html#upgrade2.2 for more details."); 163 throw new HBaseIOException("Unsupported procedure state found for ServerCrashProcedure"); 164 } 165 } 166 167 @SuppressWarnings("deprecation") 168 private void tryMigrate(FileSystem fs) throws IOException { 169 Configuration conf = server.getConfiguration(); 170 Path procWALDir = 171 new Path(CommonFSUtils.getWALRootDir(conf), WALProcedureStore.MASTER_PROCEDURE_LOGDIR); 172 if (!fs.exists(procWALDir)) { 173 return; 174 } 175 LOG.info("The old WALProcedureStore wal directory {} exists, migrating...", procWALDir); 176 WALProcedureStore store = new WALProcedureStore(conf, leaseRecovery); 177 store.start(numThreads); 178 store.recoverLease(); 179 MutableLong maxProcIdSet = new MutableLong(-1); 180 List<Procedure<?>> procs = new ArrayList<>(); 181 Map<Class<?>, List<Procedure<?>>> activeProcsByType = new HashMap<>(); 182 store.load(new ProcedureLoader() { 183 184 @Override 185 public void setMaxProcId(long maxProcId) { 186 maxProcIdSet.setValue(maxProcId); 187 } 188 189 @Override 190 public void load(ProcedureIterator procIter) throws IOException { 191 while (procIter.hasNext()) { 192 Procedure<?> proc = procIter.next(); 193 procs.add(proc); 194 if (!proc.isFinished()) { 195 activeProcsByType.computeIfAbsent(proc.getClass(), k -> new ArrayList<>()).add(proc); 196 } 197 } 198 } 199 200 @Override 201 public void handleCorrupted(ProcedureIterator procIter) throws IOException { 202 long corruptedCount = 0; 203 while (procIter.hasNext()) { 204 LOG.error("Corrupted procedure {}", procIter.next()); 205 corruptedCount++; 206 } 207 if (corruptedCount > 0) { 208 throw new IOException("There are " + corruptedCount + " corrupted procedures when" + 209 " migrating from the old WAL based store to the new region based store, please" + 210 " fix them before upgrading again."); 211 } 212 } 213 }); 214 215 // check whether there are unsupported procedures, this could happen when we are migrating from 216 // 2.1-. We used to do this in HMaster, after loading all the procedures from procedure store, 217 // but here we have to do it before migrating, otherwise, if we find some unsupported 218 // procedures, the users can not go back to 2.1 to finish them any more, as all the data are now 219 // in the new region based procedure store, which is not supported in 2.1-. 220 checkUnsupportedProcedure(activeProcsByType); 221 222 MutableLong maxProcIdFromProcs = new MutableLong(-1); 223 for (Procedure<?> proc : procs) { 224 update(proc); 225 if (proc.getProcId() > maxProcIdFromProcs.longValue()) { 226 maxProcIdFromProcs.setValue(proc.getProcId()); 227 } 228 } 229 LOG.info("Migrated {} existing procedures from the old storage format.", procs.size()); 230 LOG.info("The WALProcedureStore max pid is {}, and the max pid of all loaded procedures is {}", 231 maxProcIdSet.longValue(), maxProcIdFromProcs.longValue()); 232 // Theoretically, the maxProcIdSet should be greater than or equal to maxProcIdFromProcs, but 233 // anyway, let's do a check here. 234 if (maxProcIdSet.longValue() > maxProcIdFromProcs.longValue()) { 235 if (maxProcIdSet.longValue() > 0) { 236 // let's add a fake row to retain the max proc id 237 region.update(r -> r.put(new Put(Bytes.toBytes(maxProcIdSet.longValue())) 238 .addColumn(PROC_FAMILY, PROC_QUALIFIER, EMPTY_BYTE_ARRAY))); 239 } 240 } else if (maxProcIdSet.longValue() < maxProcIdFromProcs.longValue()) { 241 LOG.warn("The WALProcedureStore max pid is less than the max pid of all loaded procedures"); 242 } 243 store.stop(false); 244 if (!fs.delete(procWALDir, true)) { 245 throw new IOException( 246 "Failed to delete the WALProcedureStore migrated proc wal directory " + procWALDir); 247 } 248 LOG.info("Migration of WALProcedureStore finished"); 249 } 250 251 @Override 252 public void recoverLease() throws IOException { 253 LOG.info("Starting Region Procedure Store lease recovery..."); 254 FileSystem fs = CommonFSUtils.getWALFileSystem(server.getConfiguration()); 255 tryMigrate(fs); 256 } 257 258 @Override 259 public void load(ProcedureLoader loader) throws IOException { 260 List<ProcedureProtos.Procedure> procs = new ArrayList<>(); 261 long maxProcId = 0; 262 263 try (RegionScanner scanner = 264 region.getScanner(new Scan().addColumn(PROC_FAMILY, PROC_QUALIFIER))) { 265 List<Cell> cells = new ArrayList<>(); 266 boolean moreRows; 267 do { 268 moreRows = scanner.next(cells); 269 if (cells.isEmpty()) { 270 continue; 271 } 272 Cell cell = cells.get(0); 273 cells.clear(); 274 maxProcId = Math.max(maxProcId, 275 Bytes.toLong(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); 276 if (cell.getValueLength() > 0) { 277 ProcedureProtos.Procedure proto = ProcedureProtos.Procedure.parser() 278 .parseFrom(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); 279 procs.add(proto); 280 } 281 } while (moreRows); 282 } 283 loader.setMaxProcId(maxProcId); 284 ProcedureTree tree = ProcedureTree.build(procs); 285 loader.load(tree.getValidProcs()); 286 loader.handleCorrupted(tree.getCorruptedProcs()); 287 } 288 289 private void serializePut(Procedure<?> proc, List<Mutation> mutations, List<byte[]> rowsToLock) 290 throws IOException { 291 ProcedureProtos.Procedure proto = ProcedureUtil.convertToProtoProcedure(proc); 292 byte[] row = Bytes.toBytes(proc.getProcId()); 293 mutations.add(new Put(row).addColumn(PROC_FAMILY, PROC_QUALIFIER, proto.toByteArray())); 294 rowsToLock.add(row); 295 } 296 297 // As we need to keep the max procedure id, here we can not simply delete the procedure, just fill 298 // the proc column with an empty array. 299 private void serializeDelete(long procId, List<Mutation> mutations, List<byte[]> rowsToLock) { 300 byte[] row = Bytes.toBytes(procId); 301 mutations.add(new Put(row).addColumn(PROC_FAMILY, PROC_QUALIFIER, EMPTY_BYTE_ARRAY)); 302 rowsToLock.add(row); 303 } 304 305 /** 306 * Insert procedure may be called by master's rpc call. There are some check about the rpc call 307 * when mutate region. Here unset the current rpc call and set it back in finally block. See 308 * HBASE-23895 for more details. 309 */ 310 private void runWithoutRpcCall(Runnable runnable) { 311 Optional<RpcCall> rpcCall = RpcServer.unsetCurrentCall(); 312 try { 313 runnable.run(); 314 } finally { 315 rpcCall.ifPresent(RpcServer::setCurrentCall); 316 } 317 } 318 319 @Override 320 public void insert(Procedure<?> proc, Procedure<?>[] subProcs) { 321 if (subProcs == null || subProcs.length == 0) { 322 // same with update, just insert a single procedure 323 update(proc); 324 return; 325 } 326 List<Mutation> mutations = new ArrayList<>(subProcs.length + 1); 327 List<byte[]> rowsToLock = new ArrayList<>(subProcs.length + 1); 328 runWithoutRpcCall(() -> { 329 try { 330 serializePut(proc, mutations, rowsToLock); 331 for (Procedure<?> subProc : subProcs) { 332 serializePut(subProc, mutations, rowsToLock); 333 } 334 region.update(r -> r.mutateRowsWithLocks(mutations, rowsToLock, NO_NONCE, NO_NONCE)); 335 } catch (IOException e) { 336 LOG.error(HBaseMarkers.FATAL, "Failed to insert proc {}, sub procs {}", proc, 337 Arrays.toString(subProcs), e); 338 throw new UncheckedIOException(e); 339 } 340 }); 341 } 342 343 @Override 344 public void insert(Procedure<?>[] procs) { 345 List<Mutation> mutations = new ArrayList<>(procs.length); 346 List<byte[]> rowsToLock = new ArrayList<>(procs.length); 347 runWithoutRpcCall(() -> { 348 try { 349 for (Procedure<?> proc : procs) { 350 serializePut(proc, mutations, rowsToLock); 351 } 352 region.update(r -> r.mutateRowsWithLocks(mutations, rowsToLock, NO_NONCE, NO_NONCE)); 353 } catch (IOException e) { 354 LOG.error(HBaseMarkers.FATAL, "Failed to insert procs {}", Arrays.toString(procs), e); 355 throw new UncheckedIOException(e); 356 } 357 }); 358 } 359 360 @Override 361 public void update(Procedure<?> proc) { 362 runWithoutRpcCall(() -> { 363 try { 364 ProcedureProtos.Procedure proto = ProcedureUtil.convertToProtoProcedure(proc); 365 region.update(r -> r.put(new Put(Bytes.toBytes(proc.getProcId())).addColumn(PROC_FAMILY, 366 PROC_QUALIFIER, proto.toByteArray()))); 367 } catch (IOException e) { 368 LOG.error(HBaseMarkers.FATAL, "Failed to update proc {}", proc, e); 369 throw new UncheckedIOException(e); 370 } 371 }); 372 } 373 374 @Override 375 public void delete(long procId) { 376 try { 377 region.update(r -> r.put( 378 new Put(Bytes.toBytes(procId)).addColumn(PROC_FAMILY, PROC_QUALIFIER, EMPTY_BYTE_ARRAY))); 379 } catch (IOException e) { 380 LOG.error(HBaseMarkers.FATAL, "Failed to delete pid={}", procId, e); 381 throw new UncheckedIOException(e); 382 } 383 } 384 385 @Override 386 public void delete(Procedure<?> parentProc, long[] subProcIds) { 387 List<Mutation> mutations = new ArrayList<>(subProcIds.length + 1); 388 List<byte[]> rowsToLock = new ArrayList<>(subProcIds.length + 1); 389 try { 390 serializePut(parentProc, mutations, rowsToLock); 391 for (long subProcId : subProcIds) { 392 serializeDelete(subProcId, mutations, rowsToLock); 393 } 394 region.update(r -> r.mutateRowsWithLocks(mutations, rowsToLock, NO_NONCE, NO_NONCE)); 395 } catch (IOException e) { 396 LOG.error(HBaseMarkers.FATAL, "Failed to delete parent proc {}, sub pids={}", parentProc, 397 Arrays.toString(subProcIds), e); 398 throw new UncheckedIOException(e); 399 } 400 } 401 402 @Override 403 public void delete(long[] procIds, int offset, int count) { 404 if (count == 0) { 405 return; 406 } 407 if (count == 1) { 408 delete(procIds[offset]); 409 return; 410 } 411 List<Mutation> mutations = new ArrayList<>(count); 412 List<byte[]> rowsToLock = new ArrayList<>(count); 413 for (int i = 0; i < count; i++) { 414 long procId = procIds[offset + i]; 415 serializeDelete(procId, mutations, rowsToLock); 416 } 417 try { 418 region.update(r -> r.mutateRowsWithLocks(mutations, rowsToLock, NO_NONCE, NO_NONCE)); 419 } catch (IOException e) { 420 LOG.error(HBaseMarkers.FATAL, "Failed to delete pids={}", Arrays.toString(procIds), e); 421 throw new UncheckedIOException(e); 422 } 423 } 424 425 @Override 426 public void cleanup() { 427 // actually delete the procedures if it is not the one with the max procedure id. 428 List<Cell> cells = new ArrayList<Cell>(); 429 try (RegionScanner scanner = 430 region.getScanner(new Scan().addColumn(PROC_FAMILY, PROC_QUALIFIER).setReversed(true))) { 431 // skip the row with max procedure id 432 boolean moreRows = scanner.next(cells); 433 if (cells.isEmpty()) { 434 return; 435 } 436 cells.clear(); 437 while (moreRows) { 438 moreRows = scanner.next(cells); 439 if (cells.isEmpty()) { 440 continue; 441 } 442 Cell cell = cells.get(0); 443 cells.clear(); 444 if (cell.getValueLength() == 0) { 445 region.update(r -> r 446 .delete(new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()))); 447 } 448 } 449 } catch (IOException e) { 450 LOG.warn("Failed to clean up delete procedures", e); 451 } 452 } 453}