001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master.procedure; 019 020import java.io.IOException; 021import java.util.Arrays; 022import java.util.concurrent.CountDownLatch; 023import org.apache.hadoop.conf.Configuration; 024import org.apache.hadoop.fs.FileSystem; 025import org.apache.hadoop.fs.LocatedFileStatus; 026import org.apache.hadoop.fs.Path; 027import org.apache.hadoop.fs.RemoteIterator; 028import org.apache.hadoop.hbase.TableName; 029import org.apache.hadoop.hbase.client.RegionInfoBuilder; 030import org.apache.hadoop.hbase.client.TableDescriptor; 031import org.apache.hadoop.hbase.io.hfile.HFile; 032import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; 033import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; 034import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; 035import org.apache.hadoop.hbase.procedure2.ProcedureUtil; 036import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; 037import org.apache.hadoop.hbase.regionserver.HRegion; 038import org.apache.hadoop.hbase.regionserver.StoreFileInfo; 039import org.apache.hadoop.hbase.util.CommonFSUtils; 040import org.apache.hadoop.hbase.util.FSTableDescriptors; 041import org.apache.hadoop.hbase.util.RetryCounter; 042import org.apache.yetus.audience.InterfaceAudience; 043import org.slf4j.Logger; 044import org.slf4j.LoggerFactory; 045 046import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.InitMetaState; 047import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.InitMetaStateData; 048import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; 049 050/** 051 * This procedure is used to initialize meta table for a new hbase deploy. It will just schedule an 052 * {@link TransitRegionStateProcedure} to assign meta. 053 */ 054@InterfaceAudience.Private 055public class InitMetaProcedure extends AbstractStateMachineTableProcedure<InitMetaState> { 056 057 private static final Logger LOG = LoggerFactory.getLogger(InitMetaProcedure.class); 058 059 private CountDownLatch latch = new CountDownLatch(1); 060 061 private RetryCounter retryCounter; 062 063 @Override 064 public TableName getTableName() { 065 return TableName.META_TABLE_NAME; 066 } 067 068 @Override 069 public TableOperationType getTableOperationType() { 070 return TableOperationType.CREATE; 071 } 072 073 private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf) 074 throws IOException { 075 LOG.info("BOOTSTRAP: creating hbase:meta region"); 076 FileSystem fs = rootDir.getFileSystem(conf); 077 Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME); 078 if (fs.exists(tableDir) && !deleteMetaTableDirectoryIfPartial(fs, tableDir)) { 079 LOG.warn("Can not delete partial created meta table, continue..."); 080 } 081 // Bootstrapping, make sure blockcache is off. Else, one will be 082 // created here in bootstrap and it'll need to be cleaned up. Better to 083 // not make it in first place. Turn off block caching for bootstrap. 084 // Enable after. 085 TableDescriptor metaDescriptor = 086 FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, rootDir); 087 HRegion 088 .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, conf, metaDescriptor, null) 089 .close(); 090 return metaDescriptor; 091 } 092 093 @Override 094 protected Flow executeFromState(MasterProcedureEnv env, InitMetaState state) 095 throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { 096 LOG.debug("Execute {}", this); 097 try { 098 switch (state) { 099 case INIT_META_WRITE_FS_LAYOUT: 100 Configuration conf = env.getMasterConfiguration(); 101 Path rootDir = CommonFSUtils.getRootDir(conf); 102 TableDescriptor td = writeFsLayout(rootDir, conf); 103 env.getMasterServices().getTableDescriptors().update(td, true); 104 setNextState(InitMetaState.INIT_META_ASSIGN_META); 105 return Flow.HAS_MORE_STATE; 106 case INIT_META_ASSIGN_META: 107 LOG.info("Going to assign meta"); 108 addChildProcedure(env.getAssignmentManager() 109 .createAssignProcedures(Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO))); 110 return Flow.NO_MORE_STATE; 111 default: 112 throw new UnsupportedOperationException("unhandled state=" + state); 113 } 114 } catch (IOException e) { 115 if (retryCounter == null) { 116 retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); 117 } 118 long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); 119 LOG.warn("Failed to init meta, suspend {}secs", backoff, e); 120 setTimeout(Math.toIntExact(backoff)); 121 setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT); 122 skipPersistence(); 123 throw new ProcedureSuspendedException(); 124 } 125 } 126 127 @Override 128 protected boolean waitInitialized(MasterProcedureEnv env) { 129 // we do not need to wait for master initialized, we are part of the initialization. 130 return false; 131 } 132 133 @Override 134 protected void rollbackState(MasterProcedureEnv env, InitMetaState state) 135 throws IOException, InterruptedException { 136 throw new UnsupportedOperationException(); 137 } 138 139 @Override 140 protected InitMetaState getState(int stateId) { 141 return InitMetaState.forNumber(stateId); 142 } 143 144 @Override 145 protected int getStateId(InitMetaState state) { 146 return state.getNumber(); 147 } 148 149 @Override 150 protected InitMetaState getInitialState() { 151 return InitMetaState.INIT_META_WRITE_FS_LAYOUT; 152 } 153 154 @Override 155 protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { 156 super.serializeStateData(serializer); 157 serializer.serialize(InitMetaStateData.getDefaultInstance()); 158 } 159 160 @Override 161 protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { 162 super.deserializeStateData(serializer); 163 serializer.deserialize(InitMetaStateData.class); 164 } 165 166 @Override 167 protected void completionCleanup(MasterProcedureEnv env) { 168 latch.countDown(); 169 } 170 171 public void await() throws InterruptedException { 172 latch.await(); 173 } 174 175 private static boolean deleteMetaTableDirectoryIfPartial(FileSystem rootDirectoryFs, 176 Path metaTableDir) throws IOException { 177 boolean shouldDelete = true; 178 try { 179 TableDescriptor metaDescriptor = 180 FSTableDescriptors.getTableDescriptorFromFs(rootDirectoryFs, metaTableDir); 181 // when entering the state of INIT_META_WRITE_FS_LAYOUT, if a meta table directory is found, 182 // the meta table should not have any useful data and considers as partial. 183 // if we find any valid HFiles, operator should fix the meta e.g. via HBCK. 184 if (metaDescriptor != null && metaDescriptor.getColumnFamilyCount() > 0) { 185 RemoteIterator<LocatedFileStatus> iterator = rootDirectoryFs.listFiles(metaTableDir, true); 186 while (iterator.hasNext()) { 187 LocatedFileStatus status = iterator.next(); 188 if ( 189 StoreFileInfo.isHFile(status.getPath()) 190 && HFile.isHFileFormat(rootDirectoryFs, status.getPath()) 191 ) { 192 shouldDelete = false; 193 break; 194 } 195 } 196 } 197 } finally { 198 if (!shouldDelete) { 199 throw new IOException("Meta table is not partial, please sideline this meta directory " 200 + "or run HBCK to fix this meta table, e.g. rebuild the server hostname in ZNode for the " 201 + "meta region"); 202 } 203 return rootDirectoryFs.delete(metaTableDir, true); 204 } 205 206 } 207}