001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.master;
020
021import java.io.IOException;
022import org.apache.hadoop.conf.Configuration;
023import org.apache.hadoop.fs.FileSystem;
024import org.apache.hadoop.fs.Path;
025import org.apache.hadoop.fs.permission.FsAction;
026import org.apache.hadoop.fs.permission.FsPermission;
027import org.apache.hadoop.hbase.ClusterId;
028import org.apache.hadoop.hbase.HConstants;
029import org.apache.hadoop.hbase.TableName;
030import org.apache.hadoop.hbase.backup.HFileArchiver;
031import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
033import org.apache.hadoop.hbase.client.RegionInfo;
034import org.apache.hadoop.hbase.client.RegionInfoBuilder;
035import org.apache.hadoop.hbase.client.TableDescriptor;
036import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
037import org.apache.hadoop.hbase.exceptions.DeserializationException;
038import org.apache.hadoop.hbase.fs.HFileSystem;
039import org.apache.hadoop.hbase.log.HBaseMarkers;
040import org.apache.hadoop.hbase.mob.MobConstants;
041import org.apache.hadoop.hbase.regionserver.HRegion;
042import org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclHelper;
043import org.apache.hadoop.hbase.util.Bytes;
044import org.apache.hadoop.hbase.util.CommonFSUtils;
045import org.apache.hadoop.hbase.util.FSTableDescriptors;
046import org.apache.hadoop.hbase.util.FSUtils;
047import org.apache.hadoop.ipc.RemoteException;
048import org.apache.yetus.audience.InterfaceAudience;
049import org.slf4j.Logger;
050import org.slf4j.LoggerFactory;
051
052import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
053
054/**
055 * This class abstracts a bunch of operations the HMaster needs to interact with
056 * the underlying file system like creating the initial layout, checking file
057 * system status, etc.
058 */
059@InterfaceAudience.Private
060public class MasterFileSystem {
061  private static final Logger LOG = LoggerFactory.getLogger(MasterFileSystem.class);
062
063  /** Parameter name for HBase instance root directory permission*/
064  public static final String HBASE_DIR_PERMS = "hbase.rootdir.perms";
065
066  /** Parameter name for HBase WAL directory permission*/
067  public static final String HBASE_WAL_DIR_PERMS = "hbase.wal.dir.perms";
068
069  // HBase configuration
070  private final Configuration conf;
071  // Persisted unique cluster ID
072  private ClusterId clusterId;
073  // Keep around for convenience.
074  private final FileSystem fs;
075  // Keep around for convenience.
076  private final FileSystem walFs;
077  // root log directory on the FS
078  private final Path rootdir;
079  // hbase temp directory used for table construction and deletion
080  private final Path tempdir;
081  // root hbase directory on the FS
082  private final Path walRootDir;
083
084
085  /*
086   * In a secure env, the protected sub-directories and files under the HBase rootDir
087   * would be restricted. The sub-directory will have '700' except the bulk load staging dir,
088   * which will have '711'.  The default '700' can be overwritten by setting the property
089   * 'hbase.rootdir.perms'. The protected files (version file, clusterId file) will have '600'.
090   * The rootDir itself will be created with HDFS default permissions if it does not exist.
091   * We will check the rootDir permissions to make sure it has 'x' for all to ensure access
092   * to the staging dir. If it does not, we will add it.
093   */
094  // Permissions for the directories under rootDir that need protection
095  private final FsPermission secureRootSubDirPerms;
096  // Permissions for the files under rootDir that need protection
097  private final FsPermission secureRootFilePerms = new FsPermission("600");
098  // Permissions for bulk load staging directory under rootDir
099  private final FsPermission HiddenDirPerms = FsPermission.valueOf("-rwx--x--x");
100
101  private boolean isSecurityEnabled;
102
103  public MasterFileSystem(Configuration conf) throws IOException {
104    this.conf = conf;
105    // Set filesystem to be that of this.rootdir else we get complaints about
106    // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
107    // default localfs.  Presumption is that rootdir is fully-qualified before
108    // we get to here with appropriate fs scheme.
109    this.rootdir = CommonFSUtils.getRootDir(conf);
110    this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
111    // Cover both bases, the old way of setting default fs and the new.
112    // We're supposed to run on 0.20 and 0.21 anyways.
113    this.fs = this.rootdir.getFileSystem(conf);
114    this.walRootDir = CommonFSUtils.getWALRootDir(conf);
115    this.walFs = CommonFSUtils.getWALFileSystem(conf);
116    CommonFSUtils.setFsDefault(conf, new Path(this.walFs.getUri()));
117    walFs.setConf(conf);
118    CommonFSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
119    // make sure the fs has the same conf
120    fs.setConf(conf);
121    this.secureRootSubDirPerms = new FsPermission(conf.get("hbase.rootdir.perms", "700"));
122    this.isSecurityEnabled = "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication"));
123    // setup the filesystem variable
124    createInitialFileSystemLayout();
125    HFileSystem.addLocationsOrderInterceptor(conf);
126  }
127
128  /**
129   * Create initial layout in filesystem.
130   * <ol>
131   * <li>Check if the meta region exists and is readable, if not create it.
132   * Create hbase.version and the hbase:meta directory if not one.
133   * </li>
134   * </ol>
135   * Idempotent.
136   */
137  private void createInitialFileSystemLayout() throws IOException {
138
139    final String[] protectedSubDirs = new String[] {
140        HConstants.BASE_NAMESPACE_DIR,
141        HConstants.HFILE_ARCHIVE_DIRECTORY,
142        HConstants.HBCK_SIDELINEDIR_NAME,
143        MobConstants.MOB_DIR_NAME
144    };
145
146    //With the introduction of RegionProcedureStore,
147    // there's no need to create MasterProcWAL dir here anymore. See HBASE-23715
148    final String[] protectedSubLogDirs = new String[] {
149      HConstants.HREGION_LOGDIR_NAME,
150      HConstants.HREGION_OLDLOGDIR_NAME,
151      HConstants.CORRUPT_DIR_NAME
152    };
153    // check if the root directory exists
154    checkRootDir(this.rootdir, conf, this.fs);
155
156    // Check the directories under rootdir.
157    checkTempDir(this.tempdir, conf, this.fs);
158    for (String subDir : protectedSubDirs) {
159      checkSubDir(new Path(this.rootdir, subDir), HBASE_DIR_PERMS);
160    }
161
162    final String perms;
163    if (!this.walRootDir.equals(this.rootdir)) {
164      perms = HBASE_WAL_DIR_PERMS;
165    } else {
166      perms = HBASE_DIR_PERMS;
167    }
168    for (String subDir : protectedSubLogDirs) {
169      checkSubDir(new Path(this.walRootDir, subDir), perms);
170    }
171
172    checkStagingDir();
173
174    // Handle the last few special files and set the final rootDir permissions
175    // rootDir needs 'x' for all to support bulk load staging dir
176    if (isSecurityEnabled) {
177      fs.setPermission(new Path(rootdir, HConstants.VERSION_FILE_NAME), secureRootFilePerms);
178      fs.setPermission(new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms);
179    }
180    FsPermission currentRootPerms = fs.getFileStatus(this.rootdir).getPermission();
181    if (!currentRootPerms.getUserAction().implies(FsAction.EXECUTE)
182        || !currentRootPerms.getGroupAction().implies(FsAction.EXECUTE)
183        || !currentRootPerms.getOtherAction().implies(FsAction.EXECUTE)) {
184      LOG.warn("rootdir permissions do not contain 'excute' for user, group or other. "
185        + "Automatically adding 'excute' permission for all");
186      fs.setPermission(
187        this.rootdir,
188        new FsPermission(currentRootPerms.getUserAction().or(FsAction.EXECUTE), currentRootPerms
189            .getGroupAction().or(FsAction.EXECUTE), currentRootPerms.getOtherAction().or(
190          FsAction.EXECUTE)));
191    }
192  }
193
194  public FileSystem getFileSystem() {
195    return this.fs;
196  }
197
198  public FileSystem getWALFileSystem() {
199    return this.walFs;
200  }
201
202  public Configuration getConfiguration() {
203    return this.conf;
204  }
205
206  /**
207   * @return HBase root dir.
208   */
209  public Path getRootDir() {
210    return this.rootdir;
211  }
212
213  /**
214   * @return HBase root log dir.
215   */
216  public Path getWALRootDir() {
217    return this.walRootDir;
218  }
219
220  /**
221   * @return the directory for a give {@code region}.
222   */
223  public Path getRegionDir(RegionInfo region) {
224    return FSUtils.getRegionDirFromRootDir(getRootDir(), region);
225  }
226
227  /**
228   * @return HBase temp dir.
229   */
230  public Path getTempDir() {
231    return this.tempdir;
232  }
233
234  /**
235   * @return The unique identifier generated for this cluster
236   */
237  public ClusterId getClusterId() {
238    return clusterId;
239  }
240
241  /**
242   * Get the rootdir.  Make sure its wholesome and exists before returning.
243   * @param rd
244   * @param c
245   * @param fs
246   * @return hbase.rootdir (after checks for existence and bootstrapping if
247   * needed populating the directory with necessary bootup files).
248   * @throws IOException
249   */
250  private Path checkRootDir(final Path rd, final Configuration c, final FileSystem fs)
251      throws IOException {
252    // If FS is in safe mode wait till out of it.
253    FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
254
255    // Filesystem is good. Go ahead and check for hbase.rootdir.
256    try {
257      if (!fs.exists(rd)) {
258        fs.mkdirs(rd);
259        // DFS leaves safe mode with 0 DNs when there are 0 blocks.
260        // We used to handle this by checking the current DN count and waiting until
261        // it is nonzero. With security, the check for datanode count doesn't work --
262        // it is a privileged op. So instead we adopt the strategy of the jobtracker
263        // and simply retry file creation during bootstrap indefinitely. As soon as
264        // there is one datanode it will succeed. Permission problems should have
265        // already been caught by mkdirs above.
266        FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
267          10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
268            HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
269      } else {
270        if (!fs.isDirectory(rd)) {
271          throw new IllegalArgumentException(rd.toString() + " is not a directory");
272        }
273        // as above
274        FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
275          10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
276            HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
277      }
278    } catch (DeserializationException de) {
279      LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for "
280        + HConstants.HBASE_DIR, de);
281      throw new IOException(de);
282    } catch (IllegalArgumentException iae) {
283      LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for "
284        + HConstants.HBASE_DIR + " " + rd.toString(), iae);
285      throw iae;
286    }
287    // Make sure cluster ID exists
288    if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(
289        HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
290      FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
291    }
292    clusterId = FSUtils.getClusterId(fs, rd);
293
294    // Make sure the meta region directory exists!
295    if (!FSUtils.metaRegionExists(fs, rd)) {
296      bootstrap(rd, c);
297    }
298
299    // Create tableinfo-s for hbase:meta if not already there.
300    // assume, created table descriptor is for enabling table
301    // meta table is a system table, so descriptors are predefined,
302    // we should get them from registry.
303    FSTableDescriptors fsd = new FSTableDescriptors(fs, rd);
304    fsd.createTableDescriptor(fsd.get(TableName.META_TABLE_NAME));
305
306    return rd;
307  }
308
309  /**
310   * Make sure the hbase temp directory exists and is empty.
311   * NOTE that this method is only executed once just after the master becomes the active one.
312   */
313  @VisibleForTesting
314  void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
315      throws IOException {
316    // If the temp directory exists, clear the content (left over, from the previous run)
317    if (fs.exists(tmpdir)) {
318      // Archive table in temp, maybe left over from failed deletion,
319      // if not the cleaner will take care of them.
320      for (Path tableDir: FSUtils.getTableDirs(fs, tmpdir)) {
321        HFileArchiver.archiveRegions(c, fs, this.rootdir, tableDir,
322          FSUtils.getRegionDirs(fs, tableDir));
323        if (!FSUtils.getRegionDirs(fs, tableDir).isEmpty()) {
324          LOG.warn("Found regions in tmp dir after archiving table regions, {}", tableDir);
325        }
326      }
327      // if acl sync to hdfs is enabled, then skip delete tmp dir because ACLs are set
328      if (!SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(c) && !fs.delete(tmpdir, true)) {
329        throw new IOException("Unable to clean the temp directory: " + tmpdir);
330      }
331    }
332
333    // Create the temp directory
334    if (!fs.exists(tmpdir)) {
335      if (isSecurityEnabled) {
336        if (!fs.mkdirs(tmpdir, secureRootSubDirPerms)) {
337          throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
338        }
339      } else {
340        if (!fs.mkdirs(tmpdir)) {
341          throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
342        }
343      }
344    }
345  }
346
347  /**
348   * Make sure the directories under rootDir have good permissions. Create if necessary.
349   * @param p
350   * @throws IOException
351   */
352  private void checkSubDir(final Path p, final String dirPermsConfName) throws IOException {
353    FileSystem fs = p.getFileSystem(conf);
354    FsPermission dirPerms = new FsPermission(conf.get(dirPermsConfName, "700"));
355    if (!fs.exists(p)) {
356      if (isSecurityEnabled) {
357        if (!fs.mkdirs(p, secureRootSubDirPerms)) {
358          throw new IOException("HBase directory '" + p + "' creation failure.");
359        }
360      } else {
361        if (!fs.mkdirs(p)) {
362          throw new IOException("HBase directory '" + p + "' creation failure.");
363        }
364      }
365    }
366    else {
367      if (isSecurityEnabled && !dirPerms.equals(fs.getFileStatus(p).getPermission())) {
368        // check whether the permission match
369        LOG.warn("Found HBase directory permissions NOT matching expected permissions for "
370            + p.toString() + " permissions=" + fs.getFileStatus(p).getPermission()
371            + ", expecting " + dirPerms + ". Automatically setting the permissions. "
372            + "You can change the permissions by setting \"" + dirPermsConfName + "\" in hbase-site.xml "
373            + "and restarting the master");
374        fs.setPermission(p, dirPerms);
375      }
376    }
377  }
378
379  /**
380   * Check permissions for bulk load staging directory. This directory has special hidden
381   * permissions. Create it if necessary.
382   * @throws IOException
383   */
384  private void checkStagingDir() throws IOException {
385    Path p = new Path(this.rootdir, HConstants.BULKLOAD_STAGING_DIR_NAME);
386    try {
387      if (!this.fs.exists(p)) {
388        if (!this.fs.mkdirs(p, HiddenDirPerms)) {
389          throw new IOException("Failed to create staging directory " + p.toString());
390        }
391      } else {
392        this.fs.setPermission(p, HiddenDirPerms);
393      }
394    } catch (IOException e) {
395      LOG.error("Failed to create or set permission on staging directory " + p.toString());
396      throw new IOException("Failed to create or set permission on staging directory "
397          + p.toString(), e);
398    }
399  }
400
401  private static void bootstrap(final Path rd, final Configuration c)
402  throws IOException {
403    LOG.info("BOOTSTRAP: creating hbase:meta region");
404    try {
405      // Bootstrapping, make sure blockcache is off.  Else, one will be
406      // created here in bootstrap and it'll need to be cleaned up.  Better to
407      // not make it in first place.  Turn off block caching for bootstrap.
408      // Enable after.
409      FSTableDescriptors.tryUpdateMetaTableDescriptor(c);
410      TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
411      HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rd,
412          c, setInfoFamilyCachingForMeta(metaDescriptor, false), null);
413      meta.close();
414    } catch (IOException e) {
415        e = e instanceof RemoteException ?
416                ((RemoteException)e).unwrapRemoteException() : e;
417      LOG.error("bootstrap", e);
418      throw e;
419    }
420  }
421
422  /**
423   * Enable in memory caching for hbase:meta
424   */
425  public static TableDescriptor setInfoFamilyCachingForMeta(TableDescriptor metaDescriptor, final boolean b) {
426    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(metaDescriptor);
427    for (ColumnFamilyDescriptor hcd: metaDescriptor.getColumnFamilies()) {
428      if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
429        builder.modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(hcd)
430                .setBlockCacheEnabled(b)
431                .setInMemory(b)
432                .build());
433      }
434    }
435    return builder.build();
436  }
437
438  public void deleteFamilyFromFS(RegionInfo region, byte[] familyName)
439      throws IOException {
440    deleteFamilyFromFS(rootdir, region, familyName);
441  }
442
443  public void deleteFamilyFromFS(Path rootDir, RegionInfo region, byte[] familyName)
444      throws IOException {
445    // archive family store files
446    Path tableDir = CommonFSUtils.getTableDir(rootDir, region.getTable());
447    HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
448
449    // delete the family folder
450    Path familyDir = new Path(tableDir,
451      new Path(region.getEncodedName(), Bytes.toString(familyName)));
452    if (fs.delete(familyDir, true) == false) {
453      if (fs.exists(familyDir)) {
454        throw new IOException("Could not delete family "
455            + Bytes.toString(familyName) + " from FileSystem for region "
456            + region.getRegionNameAsString() + "(" + region.getEncodedName()
457            + ")");
458      }
459    }
460  }
461
462  public void stop() {
463  }
464
465  public void logFileSystemState(Logger log) throws IOException {
466    CommonFSUtils.logFileSystemState(fs, rootdir, log);
467  }
468}