001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master.procedure; 019 020import static org.apache.hadoop.hbase.NamespaceDescriptor.DEFAULT_NAMESPACE; 021import static org.apache.hadoop.hbase.NamespaceDescriptor.SYSTEM_NAMESPACE; 022import static org.apache.hadoop.hbase.master.TableNamespaceManager.insertNamespaceToMeta; 023import static org.apache.hadoop.hbase.master.procedure.AbstractStateMachineNamespaceProcedure.createDirectory; 024 025import java.io.IOException; 026import java.util.Arrays; 027import java.util.concurrent.CountDownLatch; 028import org.apache.hadoop.conf.Configuration; 029import org.apache.hadoop.fs.FileSystem; 030import org.apache.hadoop.fs.LocatedFileStatus; 031import org.apache.hadoop.fs.Path; 032import org.apache.hadoop.fs.RemoteIterator; 033import org.apache.hadoop.hbase.TableName; 034import org.apache.hadoop.hbase.client.RegionInfoBuilder; 035import org.apache.hadoop.hbase.client.TableDescriptor; 036import org.apache.hadoop.hbase.io.hfile.HFile; 037import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; 038import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; 039import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; 040import org.apache.hadoop.hbase.procedure2.ProcedureUtil; 041import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; 042import org.apache.hadoop.hbase.regionserver.HRegion; 043import org.apache.hadoop.hbase.regionserver.StoreFileInfo; 044import org.apache.hadoop.hbase.util.CommonFSUtils; 045import org.apache.hadoop.hbase.util.FSTableDescriptors; 046import org.apache.hadoop.hbase.util.RetryCounter; 047import org.apache.yetus.audience.InterfaceAudience; 048import org.slf4j.Logger; 049import org.slf4j.LoggerFactory; 050 051import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.InitMetaState; 052import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.InitMetaStateData; 053import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; 054 055/** 056 * This procedure is used to initialize meta table for a new hbase deploy. It will just schedule an 057 * {@link TransitRegionStateProcedure} to assign meta. 058 */ 059@InterfaceAudience.Private 060public class InitMetaProcedure extends AbstractStateMachineTableProcedure<InitMetaState> { 061 062 private static final Logger LOG = LoggerFactory.getLogger(InitMetaProcedure.class); 063 064 private CountDownLatch latch = new CountDownLatch(1); 065 066 private RetryCounter retryCounter; 067 068 @Override 069 public TableName getTableName() { 070 return TableName.META_TABLE_NAME; 071 } 072 073 @Override 074 public TableOperationType getTableOperationType() { 075 return TableOperationType.CREATE; 076 } 077 078 private static TableDescriptor writeFsLayout(Path rootDir, MasterProcedureEnv env) 079 throws IOException { 080 LOG.info("BOOTSTRAP: creating hbase:meta region"); 081 FileSystem fs = rootDir.getFileSystem(env.getMasterConfiguration()); 082 Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME); 083 if (fs.exists(tableDir) && !deleteMetaTableDirectoryIfPartial(fs, tableDir)) { 084 LOG.warn("Can not delete partial created meta table, continue..."); 085 } 086 // Bootstrapping, make sure blockcache is off. Else, one will be 087 // created here in bootstrap and it'll need to be cleaned up. Better to 088 // not make it in first place. Turn off block caching for bootstrap. 089 // Enable after. 090 TableDescriptor metaDescriptor = FSTableDescriptors 091 .tryUpdateAndGetMetaTableDescriptor(env.getMasterConfiguration(), fs, rootDir); 092 HRegion 093 .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, env.getMasterConfiguration(), 094 metaDescriptor, null, env.getMasterServices().getKeyManagementService()) 095 .close(); 096 return metaDescriptor; 097 } 098 099 @Override 100 protected Flow executeFromState(MasterProcedureEnv env, InitMetaState state) 101 throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { 102 LOG.debug("Execute {}", this); 103 try { 104 switch (state) { 105 case INIT_META_WRITE_FS_LAYOUT: 106 Configuration conf = env.getMasterConfiguration(); 107 Path rootDir = CommonFSUtils.getRootDir(conf); 108 TableDescriptor td = writeFsLayout(rootDir, env); 109 env.getMasterServices().getTableDescriptors().update(td, true); 110 setNextState(InitMetaState.INIT_META_ASSIGN_META); 111 return Flow.HAS_MORE_STATE; 112 case INIT_META_ASSIGN_META: 113 LOG.info("Going to assign meta"); 114 addChildProcedure(env.getAssignmentManager() 115 .createAssignProcedures(Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO))); 116 setNextState(InitMetaState.INIT_META_CREATE_NAMESPACES); 117 return Flow.HAS_MORE_STATE; 118 case INIT_META_CREATE_NAMESPACES: 119 LOG.info("Going to create {} and {} namespaces", DEFAULT_NAMESPACE, SYSTEM_NAMESPACE); 120 createDirectory(env, DEFAULT_NAMESPACE); 121 createDirectory(env, SYSTEM_NAMESPACE); 122 // here the TableNamespaceManager has not been initialized yet, so we have to insert the 123 // record directly into meta table, later the TableNamespaceManager will load these two 124 // namespaces when starting. 125 insertNamespaceToMeta(env.getMasterServices().getConnection(), DEFAULT_NAMESPACE); 126 insertNamespaceToMeta(env.getMasterServices().getConnection(), SYSTEM_NAMESPACE); 127 128 return Flow.NO_MORE_STATE; 129 default: 130 throw new UnsupportedOperationException("unhandled state=" + state); 131 } 132 } catch (IOException e) { 133 if (retryCounter == null) { 134 retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); 135 } 136 long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); 137 LOG.warn("Failed to init meta, suspend {}secs", backoff, e); 138 setTimeout(Math.toIntExact(backoff)); 139 setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT); 140 skipPersistence(); 141 throw new ProcedureSuspendedException(); 142 } 143 } 144 145 @Override 146 protected boolean waitInitialized(MasterProcedureEnv env) { 147 // we do not need to wait for master initialized, we are part of the initialization. 148 return false; 149 } 150 151 @Override 152 protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { 153 setState(ProcedureProtos.ProcedureState.RUNNABLE); 154 env.getProcedureScheduler().addFront(this); 155 return false; 156 } 157 158 @Override 159 protected void rollbackState(MasterProcedureEnv env, InitMetaState state) 160 throws IOException, InterruptedException { 161 throw new UnsupportedOperationException(); 162 } 163 164 @Override 165 protected InitMetaState getState(int stateId) { 166 return InitMetaState.forNumber(stateId); 167 } 168 169 @Override 170 protected int getStateId(InitMetaState state) { 171 return state.getNumber(); 172 } 173 174 @Override 175 protected InitMetaState getInitialState() { 176 return InitMetaState.INIT_META_WRITE_FS_LAYOUT; 177 } 178 179 @Override 180 protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { 181 super.serializeStateData(serializer); 182 serializer.serialize(InitMetaStateData.getDefaultInstance()); 183 } 184 185 @Override 186 protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { 187 super.deserializeStateData(serializer); 188 serializer.deserialize(InitMetaStateData.class); 189 } 190 191 @Override 192 protected void completionCleanup(MasterProcedureEnv env) { 193 latch.countDown(); 194 } 195 196 public void await() throws InterruptedException { 197 latch.await(); 198 } 199 200 private static boolean deleteMetaTableDirectoryIfPartial(FileSystem rootDirectoryFs, 201 Path metaTableDir) throws IOException { 202 boolean isPartial = true; 203 try { 204 TableDescriptor metaDescriptor = 205 FSTableDescriptors.getTableDescriptorFromFs(rootDirectoryFs, metaTableDir); 206 // when entering the state of INIT_META_WRITE_FS_LAYOUT, if a meta table directory is found, 207 // the meta table should not have any useful data and considers as partial. 208 // if we find any valid HFiles, operator should fix the meta e.g. via HBCK. 209 if (metaDescriptor != null && metaDescriptor.getColumnFamilyCount() > 0) { 210 RemoteIterator<LocatedFileStatus> iterator = rootDirectoryFs.listFiles(metaTableDir, true); 211 while (iterator.hasNext()) { 212 LocatedFileStatus status = iterator.next(); 213 if ( 214 StoreFileInfo.isHFile(status.getPath()) 215 && HFile.isHFileFormat(rootDirectoryFs, status.getPath()) 216 ) { 217 isPartial = false; 218 break; 219 } 220 } 221 } 222 } finally { 223 if (!isPartial) { 224 throw new IOException("Meta table is not partial, please sideline this meta directory " 225 + "or run HBCK to fix this meta table, e.g. rebuild the server hostname in ZNode for the " 226 + "meta region"); 227 } 228 return rootDirectoryFs.delete(metaTableDir, true); 229 } 230 231 } 232}