001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master; 019 020import java.io.IOException; 021import java.util.List; 022import java.util.Optional; 023import java.util.concurrent.ConcurrentHashMap; 024import java.util.concurrent.ConcurrentMap; 025import java.util.stream.Collectors; 026import org.apache.commons.lang3.StringUtils; 027import org.apache.hadoop.hbase.Cell; 028import org.apache.hadoop.hbase.DoNotRetryIOException; 029import org.apache.hadoop.hbase.HBaseIOException; 030import org.apache.hadoop.hbase.HConstants; 031import org.apache.hadoop.hbase.NamespaceDescriptor; 032import org.apache.hadoop.hbase.TableName; 033import org.apache.hadoop.hbase.client.Connection; 034import org.apache.hadoop.hbase.client.Delete; 035import org.apache.hadoop.hbase.client.Put; 036import org.apache.hadoop.hbase.client.Result; 037import org.apache.hadoop.hbase.client.ResultScanner; 038import org.apache.hadoop.hbase.client.Table; 039import org.apache.hadoop.hbase.client.TableDescriptor; 040import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 041import org.apache.hadoop.hbase.client.TableState; 042import org.apache.hadoop.hbase.constraint.ConstraintException; 043import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure; 044import org.apache.hadoop.hbase.master.procedure.MigrateNamespaceTableProcedure; 045import org.apache.hadoop.hbase.util.Bytes; 046import org.apache.yetus.audience.InterfaceAudience; 047 048import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; 049 050import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 051import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 052 053/** 054 * This is a helper class used internally to manage the namespace metadata that is stored in the ns 055 * family in meta table. 056 */ 057@InterfaceAudience.Private 058public class TableNamespaceManager { 059 060 public static final String KEY_MAX_REGIONS = "hbase.namespace.quota.maxregions"; 061 public static final String KEY_MAX_TABLES = "hbase.namespace.quota.maxtables"; 062 static final String NS_INIT_TIMEOUT = "hbase.master.namespace.init.timeout"; 063 static final int DEFAULT_NS_INIT_TIMEOUT = 300000; 064 065 private final ConcurrentMap<String, NamespaceDescriptor> cache = new ConcurrentHashMap<>(); 066 067 private final MasterServices masterServices; 068 069 private volatile boolean migrationDone; 070 071 TableNamespaceManager(MasterServices masterServices) { 072 this.masterServices = masterServices; 073 } 074 075 private void tryMigrateNamespaceTable() throws IOException, InterruptedException { 076 Optional<MigrateNamespaceTableProcedure> opt = masterServices.getProcedures().stream() 077 .filter(p -> p instanceof MigrateNamespaceTableProcedure) 078 .map(p -> (MigrateNamespaceTableProcedure) p).findAny(); 079 if (!opt.isPresent()) { 080 // the procedure is not present, check whether have the ns family in meta table 081 TableDescriptor metaTableDesc = 082 masterServices.getTableDescriptors().get(TableName.META_TABLE_NAME); 083 if (metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) { 084 // normal case, upgrading is done or the cluster is created with 3.x code 085 migrationDone = true; 086 } else { 087 // submit the migration procedure 088 MigrateNamespaceTableProcedure proc = new MigrateNamespaceTableProcedure(); 089 masterServices.getMasterProcedureExecutor().submitProcedure(proc); 090 } 091 } else { 092 if (opt.get().isFinished()) { 093 // the procedure is already done 094 migrationDone = true; 095 } 096 // we have already submitted the procedure, continue 097 } 098 } 099 100 private void addToCache(Result result, byte[] family, byte[] qualifier) throws IOException { 101 Cell cell = result.getColumnLatestCell(family, qualifier); 102 NamespaceDescriptor ns = 103 ProtobufUtil.toNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.parseFrom(CodedInputStream 104 .newInstance(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()))); 105 cache.put(ns.getName(), ns); 106 } 107 108 private void loadFromMeta() throws IOException { 109 try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME); 110 ResultScanner scanner = table.getScanner(HConstants.NAMESPACE_FAMILY)) { 111 for (Result result;;) { 112 result = scanner.next(); 113 if (result == null) { 114 break; 115 } 116 addToCache(result, HConstants.NAMESPACE_FAMILY, HConstants.NAMESPACE_COL_DESC_QUALIFIER); 117 } 118 } 119 } 120 121 private void loadFromNamespace() throws IOException { 122 try (Table table = masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME); 123 ResultScanner scanner = 124 table.getScanner(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES)) { 125 for (Result result;;) { 126 result = scanner.next(); 127 if (result == null) { 128 break; 129 } 130 addToCache(result, TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES, 131 TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES); 132 } 133 } 134 } 135 136 private boolean shouldLoadFromMeta() throws IOException { 137 if (migrationDone) { 138 return true; 139 } 140 // the implementation is bit tricky 141 // if there is already a disable namespace table procedure or the namespace table is already 142 // disabled, we are safe to read from meta table as the migration is already done. If not, since 143 // we are part of the master initialization work, so we can make sure that when reaching here, 144 // the master has not been marked as initialize yet. And DisableTableProcedure can only be 145 // executed after master is initialized, so here we are safe to read from namespace table, 146 // without worrying about that the namespace table is disabled while we are reading and crash 147 // the master startup. 148 if ( 149 masterServices.getTableStateManager().isTableState(TableName.NAMESPACE_TABLE_NAME, 150 TableState.State.DISABLED) 151 ) { 152 return true; 153 } 154 if ( 155 masterServices.getProcedures().stream().filter(p -> p instanceof DisableTableProcedure) 156 .anyMatch( 157 p -> ((DisableTableProcedure) p).getTableName().equals(TableName.NAMESPACE_TABLE_NAME)) 158 ) { 159 return true; 160 } 161 return false; 162 } 163 164 private void loadNamespaceIntoCache() throws IOException { 165 if (shouldLoadFromMeta()) { 166 loadFromMeta(); 167 } else { 168 loadFromNamespace(); 169 } 170 171 } 172 173 public void start() throws IOException, InterruptedException { 174 tryMigrateNamespaceTable(); 175 loadNamespaceIntoCache(); 176 } 177 178 /** 179 * check whether a namespace has already existed. 180 */ 181 public boolean doesNamespaceExist(String namespaceName) throws IOException { 182 return cache.containsKey(namespaceName); 183 } 184 185 public NamespaceDescriptor get(String name) throws IOException { 186 return cache.get(name); 187 } 188 189 private void checkMigrationDone() throws IOException { 190 if (!migrationDone) { 191 throw new HBaseIOException("namespace migration is ongoing, modification is disallowed"); 192 } 193 } 194 195 public void addOrUpdateNamespace(NamespaceDescriptor ns) throws IOException { 196 checkMigrationDone(); 197 insertNamespaceToMeta(masterServices.getConnection(), ns); 198 cache.put(ns.getName(), ns); 199 } 200 201 public static void insertNamespaceToMeta(Connection conn, NamespaceDescriptor ns) 202 throws IOException { 203 byte[] row = Bytes.toBytes(ns.getName()); 204 Put put = new Put(row, true).addColumn(HConstants.NAMESPACE_FAMILY, 205 HConstants.NAMESPACE_COL_DESC_QUALIFIER, 206 ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray()); 207 try (Table table = conn.getTable(TableName.META_TABLE_NAME)) { 208 table.put(put); 209 } 210 } 211 212 public void deleteNamespace(String namespaceName) throws IOException { 213 checkMigrationDone(); 214 Delete d = new Delete(Bytes.toBytes(namespaceName)); 215 try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME)) { 216 table.delete(d); 217 } 218 cache.remove(namespaceName); 219 } 220 221 public List<NamespaceDescriptor> list() throws IOException { 222 return cache.values().stream().collect(Collectors.toList()); 223 } 224 225 public void validateTableAndRegionCount(NamespaceDescriptor desc) throws IOException { 226 if (getMaxRegions(desc) <= 0) { 227 throw new ConstraintException( 228 "The max region quota for " + desc.getName() + " is less than or equal to zero."); 229 } 230 if (getMaxTables(desc) <= 0) { 231 throw new ConstraintException( 232 "The max tables quota for " + desc.getName() + " is less than or equal to zero."); 233 } 234 } 235 236 public void setMigrationDone() { 237 migrationDone = true; 238 } 239 240 public static long getMaxTables(NamespaceDescriptor ns) throws IOException { 241 String value = ns.getConfigurationValue(KEY_MAX_TABLES); 242 long maxTables = 0; 243 if (StringUtils.isNotEmpty(value)) { 244 try { 245 maxTables = Long.parseLong(value); 246 } catch (NumberFormatException exp) { 247 throw new DoNotRetryIOException("NumberFormatException while getting max tables.", exp); 248 } 249 } else { 250 // The property is not set, so assume its the max long value. 251 maxTables = Long.MAX_VALUE; 252 } 253 return maxTables; 254 } 255 256 public static long getMaxRegions(NamespaceDescriptor ns) throws IOException { 257 String value = ns.getConfigurationValue(KEY_MAX_REGIONS); 258 long maxRegions = 0; 259 if (StringUtils.isNotEmpty(value)) { 260 try { 261 maxRegions = Long.parseLong(value); 262 } catch (NumberFormatException exp) { 263 throw new DoNotRetryIOException("NumberFormatException while getting max regions.", exp); 264 } 265 } else { 266 // The property is not set, so assume its the max long value. 267 maxRegions = Long.MAX_VALUE; 268 } 269 return maxRegions; 270 } 271}