001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.procedure2.store.region; 019 020import static org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY; 021import static org.apache.hadoop.hbase.HConstants.NO_NONCE; 022import static org.apache.hadoop.hbase.master.region.MasterRegionFactory.PROC_FAMILY; 023 024import java.io.IOException; 025import java.io.UncheckedIOException; 026import java.util.ArrayList; 027import java.util.Arrays; 028import java.util.Collections; 029import java.util.HashMap; 030import java.util.List; 031import java.util.Map; 032import org.apache.commons.lang3.mutable.MutableLong; 033import org.apache.hadoop.conf.Configuration; 034import org.apache.hadoop.fs.FileSystem; 035import org.apache.hadoop.fs.Path; 036import org.apache.hadoop.hbase.Cell; 037import org.apache.hadoop.hbase.HBaseIOException; 038import org.apache.hadoop.hbase.Server; 039import org.apache.hadoop.hbase.client.Delete; 040import org.apache.hadoop.hbase.client.Mutation; 041import org.apache.hadoop.hbase.client.Put; 042import org.apache.hadoop.hbase.client.Scan; 043import org.apache.hadoop.hbase.log.HBaseMarkers; 044import org.apache.hadoop.hbase.master.assignment.AssignProcedure; 045import org.apache.hadoop.hbase.master.assignment.MoveRegionProcedure; 046import org.apache.hadoop.hbase.master.assignment.UnassignProcedure; 047import org.apache.hadoop.hbase.master.procedure.RecoverMetaProcedure; 048import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; 049import org.apache.hadoop.hbase.master.region.MasterRegion; 050import org.apache.hadoop.hbase.procedure2.Procedure; 051import org.apache.hadoop.hbase.procedure2.ProcedureUtil; 052import org.apache.hadoop.hbase.procedure2.store.LeaseRecovery; 053import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreBase; 054import org.apache.hadoop.hbase.procedure2.store.ProcedureTree; 055import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; 056import org.apache.hadoop.hbase.regionserver.RegionScanner; 057import org.apache.hadoop.hbase.util.Bytes; 058import org.apache.hadoop.hbase.util.CommonFSUtils; 059import org.apache.yetus.audience.InterfaceAudience; 060import org.slf4j.Logger; 061import org.slf4j.LoggerFactory; 062 063import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; 064 065import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; 066 067/** 068 * A procedure store which uses the master local store to store all the procedures. 069 * <p/> 070 * We use proc:d column to store the serialized protobuf format procedure, and when deleting we will 071 * first fill the proc:d column with an empty byte array, and then actually delete them in the 072 * {@link #cleanup()} method. This is because that we need to retain the max procedure id, so we can 073 * not directly delete a procedure row as we do not know if it is the one with the max procedure id. 074 */ 075@InterfaceAudience.Private 076public class RegionProcedureStore extends ProcedureStoreBase { 077 078 private static final Logger LOG = LoggerFactory.getLogger(RegionProcedureStore.class); 079 080 static final byte[] PROC_QUALIFIER = Bytes.toBytes("d"); 081 082 private final Server server; 083 084 private final LeaseRecovery leaseRecovery; 085 086 final MasterRegion region; 087 088 private int numThreads; 089 090 public RegionProcedureStore(Server server, MasterRegion region, LeaseRecovery leaseRecovery) { 091 this.server = server; 092 this.region = region; 093 this.leaseRecovery = leaseRecovery; 094 } 095 096 @Override 097 public void start(int numThreads) throws IOException { 098 if (!setRunning(true)) { 099 return; 100 } 101 LOG.info("Starting the Region Procedure Store, number threads={}", numThreads); 102 this.numThreads = numThreads; 103 } 104 105 @Override 106 public void stop(boolean abort) { 107 if (!setRunning(false)) { 108 return; 109 } 110 LOG.info("Stopping the Region Procedure Store, isAbort={}", abort); 111 } 112 113 @Override 114 public int getNumThreads() { 115 return numThreads; 116 } 117 118 @Override 119 public int setRunningProcedureCount(int count) { 120 // useless for region based storage. 121 return count; 122 } 123 124 @SuppressWarnings("deprecation") 125 private static final ImmutableSet<Class<?>> UNSUPPORTED_PROCEDURES = 126 ImmutableSet.of(RecoverMetaProcedure.class, AssignProcedure.class, UnassignProcedure.class, 127 MoveRegionProcedure.class); 128 129 /** 130 * In HBASE-20811, we have introduced a new TRSP to assign/unassign/move regions, and it is 131 * incompatible with the old AssignProcedure/UnassignProcedure/MoveRegionProcedure. So we need to 132 * make sure that there are none these procedures when upgrading. If there are, the master will 133 * quit, you need to go back to the old version to finish these procedures first before upgrading. 134 */ 135 private void checkUnsupportedProcedure(Map<Class<?>, List<Procedure<?>>> procsByType) 136 throws HBaseIOException { 137 // Confirm that we do not have unfinished assign/unassign related procedures. It is not easy to 138 // support both the old assign/unassign procedures and the new TransitRegionStateProcedure as 139 // there will be conflict in the code for AM. We should finish all these procedures before 140 // upgrading. 141 for (Class<?> clazz : UNSUPPORTED_PROCEDURES) { 142 List<Procedure<?>> procs = procsByType.get(clazz); 143 if (procs != null) { 144 LOG.error("Unsupported procedure type {} found, please rollback your master to the old" 145 + " version to finish them, and then try to upgrade again." 146 + " See https://hbase.apache.org/docs/upgrading/paths#upgrade-from-20-or-21-to-22" 147 + " for more details. The full procedure list: {}", clazz, procs); 148 throw new HBaseIOException("Unsupported procedure type " + clazz + " found"); 149 } 150 } 151 // A special check for SCP, as we do not support RecoverMetaProcedure any more so we need to 152 // make sure that no one will try to schedule it but SCP does have a state which will schedule 153 // it. 154 if ( 155 procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream() 156 .map(p -> (ServerCrashProcedure) p).anyMatch(ServerCrashProcedure::isInRecoverMetaState) 157 ) { 158 LOG.error("At least one ServerCrashProcedure is going to schedule a RecoverMetaProcedure," 159 + " which is not supported any more. Please rollback your master to the old version to" 160 + " finish them, and then try to upgrade again." 161 + " See https://hbase.apache.org/docs/upgrading/paths#upgrade-from-20-or-21-to-22" 162 + " for more details."); 163 throw new HBaseIOException("Unsupported procedure state found for ServerCrashProcedure"); 164 } 165 } 166 167 @SuppressWarnings("deprecation") 168 private void tryMigrate(FileSystem fs) throws IOException { 169 Configuration conf = server.getConfiguration(); 170 Path procWALDir = 171 new Path(CommonFSUtils.getWALRootDir(conf), WALProcedureStore.MASTER_PROCEDURE_LOGDIR); 172 if (!fs.exists(procWALDir)) { 173 return; 174 } 175 LOG.info("The old WALProcedureStore wal directory {} exists, migrating...", procWALDir); 176 WALProcedureStore store = new WALProcedureStore(conf, leaseRecovery); 177 store.start(numThreads); 178 store.recoverLease(); 179 MutableLong maxProcIdSet = new MutableLong(-1); 180 List<Procedure<?>> procs = new ArrayList<>(); 181 Map<Class<?>, List<Procedure<?>>> activeProcsByType = new HashMap<>(); 182 store.load(new ProcedureLoader() { 183 184 @Override 185 public void setMaxProcId(long maxProcId) { 186 maxProcIdSet.setValue(maxProcId); 187 } 188 189 @Override 190 public void load(ProcedureIterator procIter) throws IOException { 191 while (procIter.hasNext()) { 192 Procedure<?> proc = procIter.next(); 193 procs.add(proc); 194 if (!proc.isFinished()) { 195 activeProcsByType.computeIfAbsent(proc.getClass(), k -> new ArrayList<>()).add(proc); 196 } 197 } 198 } 199 200 @Override 201 public void handleCorrupted(ProcedureIterator procIter) throws IOException { 202 long corruptedCount = 0; 203 while (procIter.hasNext()) { 204 LOG.error("Corrupted procedure {}", procIter.next()); 205 corruptedCount++; 206 } 207 if (corruptedCount > 0) { 208 throw new IOException("There are " + corruptedCount + " corrupted procedures when" 209 + " migrating from the old WAL based store to the new region based store, please" 210 + " fix them before upgrading again."); 211 } 212 } 213 }); 214 215 // check whether there are unsupported procedures, this could happen when we are migrating from 216 // 2.1-. We used to do this in HMaster, after loading all the procedures from procedure store, 217 // but here we have to do it before migrating, otherwise, if we find some unsupported 218 // procedures, the users can not go back to 2.1 to finish them any more, as all the data are now 219 // in the new region based procedure store, which is not supported in 2.1-. 220 checkUnsupportedProcedure(activeProcsByType); 221 222 MutableLong maxProcIdFromProcs = new MutableLong(-1); 223 for (Procedure<?> proc : procs) { 224 update(proc); 225 if (proc.getProcId() > maxProcIdFromProcs.longValue()) { 226 maxProcIdFromProcs.setValue(proc.getProcId()); 227 } 228 } 229 LOG.info("Migrated {} existing procedures from the old storage format.", procs.size()); 230 LOG.info("The WALProcedureStore max pid is {}, and the max pid of all loaded procedures is {}", 231 maxProcIdSet.longValue(), maxProcIdFromProcs.longValue()); 232 // Theoretically, the maxProcIdSet should be greater than or equal to maxProcIdFromProcs, but 233 // anyway, let's do a check here. 234 if (maxProcIdSet.longValue() > maxProcIdFromProcs.longValue()) { 235 if (maxProcIdSet.longValue() > 0) { 236 // let's add a fake row to retain the max proc id 237 region.update(r -> r.put(new Put(Bytes.toBytes(maxProcIdSet.longValue())) 238 .addColumn(PROC_FAMILY, PROC_QUALIFIER, EMPTY_BYTE_ARRAY))); 239 } 240 } else if (maxProcIdSet.longValue() < maxProcIdFromProcs.longValue()) { 241 LOG.warn("The WALProcedureStore max pid is less than the max pid of all loaded procedures"); 242 } 243 store.stop(false); 244 if (!fs.delete(procWALDir, true)) { 245 throw new IOException( 246 "Failed to delete the WALProcedureStore migrated proc wal directory " + procWALDir); 247 } 248 LOG.info("Migration of WALProcedureStore finished"); 249 } 250 251 @Override 252 public void recoverLease() throws IOException { 253 LOG.info("Starting Region Procedure Store lease recovery..."); 254 FileSystem fs = CommonFSUtils.getWALFileSystem(server.getConfiguration()); 255 tryMigrate(fs); 256 } 257 258 @Override 259 public void load(ProcedureLoader loader) throws IOException { 260 List<ProcedureProtos.Procedure> procs = new ArrayList<>(); 261 long maxProcId = 0; 262 263 try (RegionScanner scanner = 264 region.getRegionScanner(new Scan().addColumn(PROC_FAMILY, PROC_QUALIFIER))) { 265 List<Cell> cells = new ArrayList<>(); 266 boolean moreRows; 267 do { 268 moreRows = scanner.next(cells); 269 if (cells.isEmpty()) { 270 continue; 271 } 272 Cell cell = cells.get(0); 273 cells.clear(); 274 maxProcId = Math.max(maxProcId, 275 Bytes.toLong(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); 276 if (cell.getValueLength() > 0) { 277 ProcedureProtos.Procedure proto = ProcedureProtos.Procedure.parser() 278 .parseFrom(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); 279 procs.add(proto); 280 } 281 } while (moreRows); 282 } 283 loader.setMaxProcId(maxProcId); 284 ProcedureTree tree = ProcedureTree.build(procs); 285 loader.load(tree.getValidProcs()); 286 loader.handleCorrupted(tree.getCorruptedProcs()); 287 } 288 289 private void serializePut(Procedure<?> proc, List<Mutation> mutations, List<byte[]> rowsToLock) 290 throws IOException { 291 ProcedureProtos.Procedure proto = ProcedureUtil.convertToProtoProcedure(proc); 292 byte[] row = Bytes.toBytes(proc.getProcId()); 293 mutations.add(new Put(row).addColumn(PROC_FAMILY, PROC_QUALIFIER, proto.toByteArray())); 294 rowsToLock.add(row); 295 } 296 297 // As we need to keep the max procedure id, here we can not simply delete the procedure, just fill 298 // the proc column with an empty array. 299 private void serializeDelete(long procId, List<Mutation> mutations, List<byte[]> rowsToLock) { 300 byte[] row = Bytes.toBytes(procId); 301 mutations.add(new Put(row).addColumn(PROC_FAMILY, PROC_QUALIFIER, EMPTY_BYTE_ARRAY)); 302 rowsToLock.add(row); 303 } 304 305 @Override 306 public void insert(Procedure<?> proc, Procedure<?>[] subProcs) { 307 if (subProcs == null || subProcs.length == 0) { 308 // same with update, just insert a single procedure 309 update(proc); 310 return; 311 } 312 List<Mutation> mutations = new ArrayList<>(subProcs.length + 1); 313 List<byte[]> rowsToLock = new ArrayList<>(subProcs.length + 1); 314 try { 315 serializePut(proc, mutations, rowsToLock); 316 for (Procedure<?> subProc : subProcs) { 317 serializePut(subProc, mutations, rowsToLock); 318 } 319 region.update(r -> r.mutateRowsWithLocks(mutations, rowsToLock, NO_NONCE, NO_NONCE)); 320 } catch (IOException e) { 321 LOG.error(HBaseMarkers.FATAL, "Failed to insert proc {}, sub procs {}", proc, 322 Arrays.toString(subProcs), e); 323 throw new UncheckedIOException(e); 324 } 325 } 326 327 @Override 328 public void insert(Procedure<?>[] procs) { 329 List<Mutation> mutations = new ArrayList<>(procs.length); 330 List<byte[]> rowsToLock = new ArrayList<>(procs.length); 331 try { 332 for (Procedure<?> proc : procs) { 333 serializePut(proc, mutations, rowsToLock); 334 } 335 region.update(r -> r.mutateRowsWithLocks(mutations, rowsToLock, NO_NONCE, NO_NONCE)); 336 } catch (IOException e) { 337 LOG.error(HBaseMarkers.FATAL, "Failed to insert procs {}", Arrays.toString(procs), e); 338 throw new UncheckedIOException(e); 339 } 340 } 341 342 @Override 343 public void update(Procedure<?> proc) { 344 try { 345 ProcedureProtos.Procedure proto = ProcedureUtil.convertToProtoProcedure(proc); 346 region.update(r -> r.put(new Put(Bytes.toBytes(proc.getProcId())).addColumn(PROC_FAMILY, 347 PROC_QUALIFIER, proto.toByteArray()))); 348 } catch (IOException e) { 349 LOG.error(HBaseMarkers.FATAL, "Failed to update proc {}", proc, e); 350 throw new UncheckedIOException(e); 351 } 352 } 353 354 @Override 355 public void delete(long procId) { 356 try { 357 region.update(r -> r.put( 358 new Put(Bytes.toBytes(procId)).addColumn(PROC_FAMILY, PROC_QUALIFIER, EMPTY_BYTE_ARRAY))); 359 } catch (IOException e) { 360 LOG.error(HBaseMarkers.FATAL, "Failed to delete pid={}", procId, e); 361 throw new UncheckedIOException(e); 362 } 363 } 364 365 @Override 366 public void delete(Procedure<?> parentProc, long[] subProcIds) { 367 List<Mutation> mutations = new ArrayList<>(subProcIds.length + 1); 368 List<byte[]> rowsToLock = new ArrayList<>(subProcIds.length + 1); 369 try { 370 serializePut(parentProc, mutations, rowsToLock); 371 for (long subProcId : subProcIds) { 372 serializeDelete(subProcId, mutations, rowsToLock); 373 } 374 region.update(r -> r.mutateRowsWithLocks(mutations, rowsToLock, NO_NONCE, NO_NONCE)); 375 } catch (IOException e) { 376 LOG.error(HBaseMarkers.FATAL, "Failed to delete parent proc {}, sub pids={}", parentProc, 377 Arrays.toString(subProcIds), e); 378 throw new UncheckedIOException(e); 379 } 380 } 381 382 @Override 383 public void delete(long[] procIds, int offset, int count) { 384 if (count == 0) { 385 return; 386 } 387 if (count == 1) { 388 delete(procIds[offset]); 389 return; 390 } 391 List<Mutation> mutations = new ArrayList<>(count); 392 List<byte[]> rowsToLock = new ArrayList<>(count); 393 for (int i = 0; i < count; i++) { 394 long procId = procIds[offset + i]; 395 serializeDelete(procId, mutations, rowsToLock); 396 } 397 try { 398 region.update(r -> r.mutateRowsWithLocks(mutations, rowsToLock, NO_NONCE, NO_NONCE)); 399 } catch (IOException e) { 400 LOG.error(HBaseMarkers.FATAL, "Failed to delete pids={}", Arrays.toString(procIds), e); 401 throw new UncheckedIOException(e); 402 } 403 } 404 405 @Override 406 public void cleanup() { 407 // actually delete the procedures if it is not the one with the max procedure id. 408 List<Cell> cells = new ArrayList<Cell>(); 409 try (RegionScanner scanner = region 410 .getRegionScanner(new Scan().addColumn(PROC_FAMILY, PROC_QUALIFIER).setReversed(true))) { 411 // skip the row with max procedure id 412 boolean moreRows = scanner.next(cells); 413 if (cells.isEmpty()) { 414 return; 415 } 416 cells.clear(); 417 while (moreRows) { 418 moreRows = scanner.next(cells); 419 if (cells.isEmpty()) { 420 continue; 421 } 422 Cell cell = cells.get(0); 423 cells.clear(); 424 if (cell.getValueLength() == 0) { 425 region.update( 426 r -> r.delete(new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) 427 .addFamily(PROC_FAMILY))); 428 } 429 } 430 } catch (IOException e) { 431 LOG.warn("Failed to clean up delete procedures", e); 432 } 433 } 434}