001/** 002 * 003 * Licensed to the Apache Software Foundation (ASF) under one 004 * or more contributor license agreements. See the NOTICE file 005 * distributed with this work for additional information 006 * regarding copyright ownership. The ASF licenses this file 007 * to you under the Apache License, Version 2.0 (the 008 * "License"); you may not use this file except in compliance 009 * with the License. You may obtain a copy of the License at 010 * 011 * http://www.apache.org/licenses/LICENSE-2.0 012 * 013 * Unless required by applicable law or agreed to in writing, software 014 * distributed under the License is distributed on an "AS IS" BASIS, 015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 016 * See the License for the specific language governing permissions and 017 * limitations under the License. 018 */ 019package org.apache.hadoop.hbase.master; 020 021import java.io.IOException; 022import org.apache.hadoop.conf.Configuration; 023import org.apache.hadoop.fs.FileSystem; 024import org.apache.hadoop.fs.Path; 025import org.apache.hadoop.fs.permission.FsAction; 026import org.apache.hadoop.fs.permission.FsPermission; 027import org.apache.hadoop.hbase.ClusterId; 028import org.apache.hadoop.hbase.HConstants; 029import org.apache.hadoop.hbase.TableName; 030import org.apache.hadoop.hbase.backup.HFileArchiver; 031import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 033import org.apache.hadoop.hbase.client.RegionInfo; 034import org.apache.hadoop.hbase.client.RegionInfoBuilder; 035import org.apache.hadoop.hbase.client.TableDescriptor; 036import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 037import org.apache.hadoop.hbase.exceptions.DeserializationException; 038import org.apache.hadoop.hbase.fs.HFileSystem; 039import org.apache.hadoop.hbase.log.HBaseMarkers; 040import org.apache.hadoop.hbase.mob.MobConstants; 041import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; 042import org.apache.hadoop.hbase.regionserver.HRegion; 043import org.apache.hadoop.hbase.util.Bytes; 044import org.apache.hadoop.hbase.util.FSTableDescriptors; 045import org.apache.hadoop.hbase.util.FSUtils; 046import org.apache.hadoop.ipc.RemoteException; 047import org.apache.yetus.audience.InterfaceAudience; 048import org.slf4j.Logger; 049import org.slf4j.LoggerFactory; 050 051/** 052 * This class abstracts a bunch of operations the HMaster needs to interact with 053 * the underlying file system like creating the initial layout, checking file 054 * system status, etc. 055 */ 056@InterfaceAudience.Private 057public class MasterFileSystem { 058 private static final Logger LOG = LoggerFactory.getLogger(MasterFileSystem.class); 059 060 /** Parameter name for HBase instance root directory permission*/ 061 public static final String HBASE_DIR_PERMS = "hbase.rootdir.perms"; 062 063 /** Parameter name for HBase WAL directory permission*/ 064 public static final String HBASE_WAL_DIR_PERMS = "hbase.wal.dir.perms"; 065 066 // HBase configuration 067 private final Configuration conf; 068 // Persisted unique cluster ID 069 private ClusterId clusterId; 070 // Keep around for convenience. 071 private final FileSystem fs; 072 // Keep around for convenience. 073 private final FileSystem walFs; 074 // root log directory on the FS 075 private final Path rootdir; 076 // hbase temp directory used for table construction and deletion 077 private final Path tempdir; 078 // root hbase directory on the FS 079 private final Path walRootDir; 080 081 082 /* 083 * In a secure env, the protected sub-directories and files under the HBase rootDir 084 * would be restricted. The sub-directory will have '700' except the bulk load staging dir, 085 * which will have '711'. The default '700' can be overwritten by setting the property 086 * 'hbase.rootdir.perms'. The protected files (version file, clusterId file) will have '600'. 087 * The rootDir itself will be created with HDFS default permissions if it does not exist. 088 * We will check the rootDir permissions to make sure it has 'x' for all to ensure access 089 * to the staging dir. If it does not, we will add it. 090 */ 091 // Permissions for the directories under rootDir that need protection 092 private final FsPermission secureRootSubDirPerms; 093 // Permissions for the files under rootDir that need protection 094 private final FsPermission secureRootFilePerms = new FsPermission("600"); 095 // Permissions for bulk load staging directory under rootDir 096 private final FsPermission HiddenDirPerms = FsPermission.valueOf("-rwx--x--x"); 097 098 private boolean isSecurityEnabled; 099 100 public MasterFileSystem(Configuration conf) throws IOException { 101 this.conf = conf; 102 // Set filesystem to be that of this.rootdir else we get complaints about 103 // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is 104 // default localfs. Presumption is that rootdir is fully-qualified before 105 // we get to here with appropriate fs scheme. 106 this.rootdir = FSUtils.getRootDir(conf); 107 this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY); 108 // Cover both bases, the old way of setting default fs and the new. 109 // We're supposed to run on 0.20 and 0.21 anyways. 110 this.fs = this.rootdir.getFileSystem(conf); 111 this.walRootDir = FSUtils.getWALRootDir(conf); 112 this.walFs = FSUtils.getWALFileSystem(conf); 113 FSUtils.setFsDefault(conf, new Path(this.walFs.getUri())); 114 walFs.setConf(conf); 115 FSUtils.setFsDefault(conf, new Path(this.fs.getUri())); 116 // make sure the fs has the same conf 117 fs.setConf(conf); 118 this.secureRootSubDirPerms = new FsPermission(conf.get("hbase.rootdir.perms", "700")); 119 this.isSecurityEnabled = "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication")); 120 // setup the filesystem variable 121 createInitialFileSystemLayout(); 122 HFileSystem.addLocationsOrderInterceptor(conf); 123 } 124 125 /** 126 * Create initial layout in filesystem. 127 * <ol> 128 * <li>Check if the meta region exists and is readable, if not create it. 129 * Create hbase.version and the hbase:meta directory if not one. 130 * </li> 131 * </ol> 132 * Idempotent. 133 */ 134 private void createInitialFileSystemLayout() throws IOException { 135 136 final String[] protectedSubDirs = new String[] { 137 HConstants.BASE_NAMESPACE_DIR, 138 HConstants.HFILE_ARCHIVE_DIRECTORY, 139 HConstants.HBCK_SIDELINEDIR_NAME, 140 MobConstants.MOB_DIR_NAME 141 }; 142 143 final String[] protectedSubLogDirs = new String[] { 144 HConstants.HREGION_LOGDIR_NAME, 145 HConstants.HREGION_OLDLOGDIR_NAME, 146 HConstants.CORRUPT_DIR_NAME, 147 WALProcedureStore.MASTER_PROCEDURE_LOGDIR 148 }; 149 // check if the root directory exists 150 checkRootDir(this.rootdir, conf, this.fs); 151 152 // Check the directories under rootdir. 153 checkTempDir(this.tempdir, conf, this.fs); 154 for (String subDir : protectedSubDirs) { 155 checkSubDir(new Path(this.rootdir, subDir), HBASE_DIR_PERMS); 156 } 157 158 final String perms; 159 if (!this.walRootDir.equals(this.rootdir)) { 160 perms = HBASE_WAL_DIR_PERMS; 161 } else { 162 perms = HBASE_DIR_PERMS; 163 } 164 for (String subDir : protectedSubLogDirs) { 165 checkSubDir(new Path(this.walRootDir, subDir), perms); 166 } 167 168 checkStagingDir(); 169 170 // Handle the last few special files and set the final rootDir permissions 171 // rootDir needs 'x' for all to support bulk load staging dir 172 if (isSecurityEnabled) { 173 fs.setPermission(new Path(rootdir, HConstants.VERSION_FILE_NAME), secureRootFilePerms); 174 fs.setPermission(new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms); 175 } 176 FsPermission currentRootPerms = fs.getFileStatus(this.rootdir).getPermission(); 177 if (!currentRootPerms.getUserAction().implies(FsAction.EXECUTE) 178 || !currentRootPerms.getGroupAction().implies(FsAction.EXECUTE) 179 || !currentRootPerms.getOtherAction().implies(FsAction.EXECUTE)) { 180 LOG.warn("rootdir permissions do not contain 'excute' for user, group or other. " 181 + "Automatically adding 'excute' permission for all"); 182 fs.setPermission( 183 this.rootdir, 184 new FsPermission(currentRootPerms.getUserAction().or(FsAction.EXECUTE), currentRootPerms 185 .getGroupAction().or(FsAction.EXECUTE), currentRootPerms.getOtherAction().or( 186 FsAction.EXECUTE))); 187 } 188 } 189 190 public FileSystem getFileSystem() { 191 return this.fs; 192 } 193 194 public FileSystem getWALFileSystem() { 195 return this.walFs; 196 } 197 198 public Configuration getConfiguration() { 199 return this.conf; 200 } 201 202 /** 203 * @return HBase root directory. 204 */ 205 public Path getRootDir() { 206 return this.rootdir; 207 } 208 209 /** 210 * @return HBase WAL root directory, usually the same as {@link #getRootDir()} but can be 211 * different if hfiles on one fs and WALs on another. The 'WALs' dir gets made underneath 212 * the root dir returned here; i.e. this is '/hbase', not '/hbase/WALs'. 213 */ 214 public Path getWALRootDir() { 215 return this.walRootDir; 216 } 217 218 /** 219 * @return HBase temp dir. 220 */ 221 public Path getTempDir() { 222 return this.tempdir; 223 } 224 225 /** 226 * @return The unique identifier generated for this cluster 227 */ 228 public ClusterId getClusterId() { 229 return clusterId; 230 } 231 232 /** 233 * Get the rootdir. Make sure its wholesome and exists before returning. 234 * @param rd 235 * @param c 236 * @param fs 237 * @return hbase.rootdir (after checks for existence and bootstrapping if 238 * needed populating the directory with necessary bootup files). 239 * @throws IOException 240 */ 241 private Path checkRootDir(final Path rd, final Configuration c, final FileSystem fs) 242 throws IOException { 243 // If FS is in safe mode wait till out of it. 244 FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000)); 245 246 // Filesystem is good. Go ahead and check for hbase.rootdir. 247 try { 248 if (!fs.exists(rd)) { 249 fs.mkdirs(rd); 250 // DFS leaves safe mode with 0 DNs when there are 0 blocks. 251 // We used to handle this by checking the current DN count and waiting until 252 // it is nonzero. With security, the check for datanode count doesn't work -- 253 // it is a privileged op. So instead we adopt the strategy of the jobtracker 254 // and simply retry file creation during bootstrap indefinitely. As soon as 255 // there is one datanode it will succeed. Permission problems should have 256 // already been caught by mkdirs above. 257 FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 258 10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS, 259 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS)); 260 } else { 261 if (!fs.isDirectory(rd)) { 262 throw new IllegalArgumentException(rd.toString() + " is not a directory"); 263 } 264 // as above 265 FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 266 10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS, 267 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS)); 268 } 269 } catch (DeserializationException de) { 270 LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for " 271 + HConstants.HBASE_DIR, de); 272 IOException ioe = new IOException(); 273 ioe.initCause(de); 274 throw ioe; 275 } catch (IllegalArgumentException iae) { 276 LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for " 277 + HConstants.HBASE_DIR + " " + rd.toString(), iae); 278 throw iae; 279 } 280 // Make sure cluster ID exists 281 if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt( 282 HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) { 283 FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000)); 284 } 285 clusterId = FSUtils.getClusterId(fs, rd); 286 287 // Make sure the meta region directory exists! 288 if (!FSUtils.metaRegionExists(fs, rd)) { 289 bootstrap(rd, c); 290 } 291 292 // Create tableinfo-s for hbase:meta if not already there. 293 // assume, created table descriptor is for enabling table 294 // meta table is a system table, so descriptors are predefined, 295 // we should get them from registry. 296 FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd); 297 fsd.createTableDescriptor(fsd.get(TableName.META_TABLE_NAME)); 298 299 return rd; 300 } 301 302 /** 303 * Make sure the hbase temp directory exists and is empty. 304 * NOTE that this method is only executed once just after the master becomes the active one. 305 */ 306 private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs) 307 throws IOException { 308 // If the temp directory exists, clear the content (left over, from the previous run) 309 if (fs.exists(tmpdir)) { 310 // Archive table in temp, maybe left over from failed deletion, 311 // if not the cleaner will take care of them. 312 for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) { 313 for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) { 314 HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir); 315 } 316 } 317 if (!fs.delete(tmpdir, true)) { 318 throw new IOException("Unable to clean the temp directory: " + tmpdir); 319 } 320 } 321 322 // Create the temp directory 323 if (isSecurityEnabled) { 324 if (!fs.mkdirs(tmpdir, secureRootSubDirPerms)) { 325 throw new IOException("HBase temp directory '" + tmpdir + "' creation failure."); 326 } 327 } else { 328 if (!fs.mkdirs(tmpdir)) { 329 throw new IOException("HBase temp directory '" + tmpdir + "' creation failure."); 330 } 331 } 332 } 333 334 /** 335 * Make sure the directories under rootDir have good permissions. Create if necessary. 336 * @param p 337 * @throws IOException 338 */ 339 private void checkSubDir(final Path p, final String dirPermsConfName) throws IOException { 340 FileSystem fs = p.getFileSystem(conf); 341 FsPermission dirPerms = new FsPermission(conf.get(dirPermsConfName, "700")); 342 if (!fs.exists(p)) { 343 if (isSecurityEnabled) { 344 if (!fs.mkdirs(p, secureRootSubDirPerms)) { 345 throw new IOException("HBase directory '" + p + "' creation failure."); 346 } 347 } else { 348 if (!fs.mkdirs(p)) { 349 throw new IOException("HBase directory '" + p + "' creation failure."); 350 } 351 } 352 } 353 else { 354 if (isSecurityEnabled && !dirPerms.equals(fs.getFileStatus(p).getPermission())) { 355 // check whether the permission match 356 LOG.warn("Found HBase directory permissions NOT matching expected permissions for " 357 + p.toString() + " permissions=" + fs.getFileStatus(p).getPermission() 358 + ", expecting " + dirPerms + ". Automatically setting the permissions. " 359 + "You can change the permissions by setting \"" + dirPermsConfName + "\" in hbase-site.xml " 360 + "and restarting the master"); 361 fs.setPermission(p, dirPerms); 362 } 363 } 364 } 365 366 /** 367 * Check permissions for bulk load staging directory. This directory has special hidden 368 * permissions. Create it if necessary. 369 * @throws IOException 370 */ 371 private void checkStagingDir() throws IOException { 372 Path p = new Path(this.rootdir, HConstants.BULKLOAD_STAGING_DIR_NAME); 373 try { 374 if (!this.fs.exists(p)) { 375 if (!this.fs.mkdirs(p, HiddenDirPerms)) { 376 throw new IOException("Failed to create staging directory " + p.toString()); 377 } 378 } else { 379 this.fs.setPermission(p, HiddenDirPerms); 380 } 381 } catch (IOException e) { 382 LOG.error("Failed to create or set permission on staging directory " + p.toString()); 383 throw new IOException("Failed to create or set permission on staging directory " 384 + p.toString(), e); 385 } 386 } 387 388 private static void bootstrap(final Path rd, final Configuration c) 389 throws IOException { 390 LOG.info("BOOTSTRAP: creating hbase:meta region"); 391 try { 392 // Bootstrapping, make sure blockcache is off. Else, one will be 393 // created here in bootstrap and it'll need to be cleaned up. Better to 394 // not make it in first place. Turn off block caching for bootstrap. 395 // Enable after. 396 TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); 397 HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rd, 398 c, setInfoFamilyCachingForMeta(metaDescriptor, false), null); 399 meta.close(); 400 } catch (IOException e) { 401 e = e instanceof RemoteException ? 402 ((RemoteException)e).unwrapRemoteException() : e; 403 LOG.error("bootstrap", e); 404 throw e; 405 } 406 } 407 408 /** 409 * Enable in memory caching for hbase:meta 410 */ 411 public static TableDescriptor setInfoFamilyCachingForMeta(TableDescriptor metaDescriptor, final boolean b) { 412 TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(metaDescriptor); 413 for (ColumnFamilyDescriptor hcd: metaDescriptor.getColumnFamilies()) { 414 if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) { 415 builder.modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(hcd) 416 .setBlockCacheEnabled(b) 417 .setInMemory(b) 418 .build()); 419 } 420 } 421 return builder.build(); 422 } 423 424 public void deleteFamilyFromFS(RegionInfo region, byte[] familyName) 425 throws IOException { 426 deleteFamilyFromFS(rootdir, region, familyName); 427 } 428 429 public void deleteFamilyFromFS(Path rootDir, RegionInfo region, byte[] familyName) 430 throws IOException { 431 // archive family store files 432 Path tableDir = FSUtils.getTableDir(rootDir, region.getTable()); 433 HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName); 434 435 // delete the family folder 436 Path familyDir = new Path(tableDir, 437 new Path(region.getEncodedName(), Bytes.toString(familyName))); 438 if (fs.delete(familyDir, true) == false) { 439 if (fs.exists(familyDir)) { 440 throw new IOException("Could not delete family " 441 + Bytes.toString(familyName) + " from FileSystem for region " 442 + region.getRegionNameAsString() + "(" + region.getEncodedName() 443 + ")"); 444 } 445 } 446 } 447 448 public void stop() { 449 } 450 451 public void logFileSystemState(Logger log) throws IOException { 452 FSUtils.logFileSystemState(fs, rootdir, log); 453 } 454}