001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.master;
020
021import java.io.IOException;
022
023import org.apache.hadoop.conf.Configuration;
024import org.apache.hadoop.fs.FileSystem;
025import org.apache.hadoop.fs.Path;
026import org.apache.hadoop.fs.permission.FsAction;
027import org.apache.hadoop.fs.permission.FsPermission;
028import org.apache.hadoop.hbase.ClusterId;
029import org.apache.hadoop.hbase.HConstants;
030import org.apache.hadoop.hbase.TableName;
031import org.apache.hadoop.hbase.backup.HFileArchiver;
032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
033import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
034import org.apache.hadoop.hbase.client.RegionInfo;
035import org.apache.hadoop.hbase.client.RegionInfoBuilder;
036import org.apache.hadoop.hbase.client.TableDescriptor;
037import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
038import org.apache.hadoop.hbase.exceptions.DeserializationException;
039import org.apache.hadoop.hbase.fs.HFileSystem;
040import org.apache.hadoop.hbase.log.HBaseMarkers;
041import org.apache.hadoop.hbase.mob.MobConstants;
042import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
043import org.apache.hadoop.hbase.regionserver.HRegion;
044import org.apache.hadoop.hbase.util.Bytes;
045import org.apache.hadoop.hbase.util.FSTableDescriptors;
046import org.apache.hadoop.hbase.util.FSUtils;
047import org.apache.hadoop.ipc.RemoteException;
048import org.apache.yetus.audience.InterfaceAudience;
049import org.slf4j.Logger;
050import org.slf4j.LoggerFactory;
051
052import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
053
054/**
055 * This class abstracts a bunch of operations the HMaster needs to interact with
056 * the underlying file system like creating the initial layout, checking file
057 * system status, etc.
058 */
059@InterfaceAudience.Private
060public class MasterFileSystem {
061  private static final Logger LOG = LoggerFactory.getLogger(MasterFileSystem.class);
062
063  /** Parameter name for HBase instance root directory permission*/
064  public static final String HBASE_DIR_PERMS = "hbase.rootdir.perms";
065
066  /** Parameter name for HBase WAL directory permission*/
067  public static final String HBASE_WAL_DIR_PERMS = "hbase.wal.dir.perms";
068
069  // HBase configuration
070  private final Configuration conf;
071  // Persisted unique cluster ID
072  private ClusterId clusterId;
073  // Keep around for convenience.
074  private final FileSystem fs;
075  // Keep around for convenience.
076  private final FileSystem walFs;
077  // root log directory on the FS
078  private final Path rootdir;
079  // hbase temp directory used for table construction and deletion
080  private final Path tempdir;
081  // root hbase directory on the FS
082  private final Path walRootDir;
083
084
085  /*
086   * In a secure env, the protected sub-directories and files under the HBase rootDir
087   * would be restricted. The sub-directory will have '700' except the bulk load staging dir,
088   * which will have '711'.  The default '700' can be overwritten by setting the property
089   * 'hbase.rootdir.perms'. The protected files (version file, clusterId file) will have '600'.
090   * The rootDir itself will be created with HDFS default permissions if it does not exist.
091   * We will check the rootDir permissions to make sure it has 'x' for all to ensure access
092   * to the staging dir. If it does not, we will add it.
093   */
094  // Permissions for the directories under rootDir that need protection
095  private final FsPermission secureRootSubDirPerms;
096  // Permissions for the files under rootDir that need protection
097  private final FsPermission secureRootFilePerms = new FsPermission("600");
098  // Permissions for bulk load staging directory under rootDir
099  private final FsPermission HiddenDirPerms = FsPermission.valueOf("-rwx--x--x");
100
101  private boolean isSecurityEnabled;
102
103  public MasterFileSystem(Configuration conf) throws IOException {
104    this.conf = conf;
105    // Set filesystem to be that of this.rootdir else we get complaints about
106    // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
107    // default localfs.  Presumption is that rootdir is fully-qualified before
108    // we get to here with appropriate fs scheme.
109    this.rootdir = FSUtils.getRootDir(conf);
110    this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
111    // Cover both bases, the old way of setting default fs and the new.
112    // We're supposed to run on 0.20 and 0.21 anyways.
113    this.fs = this.rootdir.getFileSystem(conf);
114    this.walRootDir = FSUtils.getWALRootDir(conf);
115    this.walFs = FSUtils.getWALFileSystem(conf);
116    FSUtils.setFsDefault(conf, new Path(this.walFs.getUri()));
117    walFs.setConf(conf);
118    FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
119    // make sure the fs has the same conf
120    fs.setConf(conf);
121    this.secureRootSubDirPerms = new FsPermission(conf.get("hbase.rootdir.perms", "700"));
122    this.isSecurityEnabled = "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication"));
123    // setup the filesystem variable
124    createInitialFileSystemLayout();
125    HFileSystem.addLocationsOrderInterceptor(conf);
126  }
127
128  /**
129   * Create initial layout in filesystem.
130   * <ol>
131   * <li>Check if the meta region exists and is readable, if not create it.
132   * Create hbase.version and the hbase:meta directory if not one.
133   * </li>
134   * </ol>
135   * Idempotent.
136   */
137  private void createInitialFileSystemLayout() throws IOException {
138
139    final String[] protectedSubDirs = new String[] {
140        HConstants.BASE_NAMESPACE_DIR,
141        HConstants.HFILE_ARCHIVE_DIRECTORY,
142        HConstants.HBCK_SIDELINEDIR_NAME,
143        MobConstants.MOB_DIR_NAME
144    };
145
146    final String[] protectedSubLogDirs = new String[] {
147      HConstants.HREGION_LOGDIR_NAME,
148      HConstants.HREGION_OLDLOGDIR_NAME,
149      HConstants.CORRUPT_DIR_NAME,
150      WALProcedureStore.MASTER_PROCEDURE_LOGDIR
151    };
152    // check if the root directory exists
153    checkRootDir(this.rootdir, conf, this.fs);
154
155    // Check the directories under rootdir.
156    checkTempDir(this.tempdir, conf, this.fs);
157    for (String subDir : protectedSubDirs) {
158      checkSubDir(new Path(this.rootdir, subDir), HBASE_DIR_PERMS);
159    }
160
161    final String perms;
162    if (!this.walRootDir.equals(this.rootdir)) {
163      perms = HBASE_WAL_DIR_PERMS;
164    } else {
165      perms = HBASE_DIR_PERMS;
166    }
167    for (String subDir : protectedSubLogDirs) {
168      checkSubDir(new Path(this.walRootDir, subDir), perms);
169    }
170
171    checkStagingDir();
172
173    // Handle the last few special files and set the final rootDir permissions
174    // rootDir needs 'x' for all to support bulk load staging dir
175    if (isSecurityEnabled) {
176      fs.setPermission(new Path(rootdir, HConstants.VERSION_FILE_NAME), secureRootFilePerms);
177      fs.setPermission(new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms);
178    }
179    FsPermission currentRootPerms = fs.getFileStatus(this.rootdir).getPermission();
180    if (!currentRootPerms.getUserAction().implies(FsAction.EXECUTE)
181        || !currentRootPerms.getGroupAction().implies(FsAction.EXECUTE)
182        || !currentRootPerms.getOtherAction().implies(FsAction.EXECUTE)) {
183      LOG.warn("rootdir permissions do not contain 'excute' for user, group or other. "
184        + "Automatically adding 'excute' permission for all");
185      fs.setPermission(
186        this.rootdir,
187        new FsPermission(currentRootPerms.getUserAction().or(FsAction.EXECUTE), currentRootPerms
188            .getGroupAction().or(FsAction.EXECUTE), currentRootPerms.getOtherAction().or(
189          FsAction.EXECUTE)));
190    }
191  }
192
193  public FileSystem getFileSystem() {
194    return this.fs;
195  }
196
197  public FileSystem getWALFileSystem() {
198    return this.walFs;
199  }
200
201  public Configuration getConfiguration() {
202    return this.conf;
203  }
204
205  /**
206   * @return HBase root dir.
207   */
208  public Path getRootDir() {
209    return this.rootdir;
210  }
211
212  /**
213   * @return HBase root log dir.
214   */
215  public Path getWALRootDir() {
216    return this.walRootDir;
217  }
218
219  /**
220   * @return the directory for a give {@code region}.
221   */
222  public Path getRegionDir(RegionInfo region) {
223    return FSUtils.getRegionDirFromRootDir(getRootDir(), region);
224  }
225
226  /**
227   * @return HBase temp dir.
228   */
229  public Path getTempDir() {
230    return this.tempdir;
231  }
232
233  /**
234   * @return The unique identifier generated for this cluster
235   */
236  public ClusterId getClusterId() {
237    return clusterId;
238  }
239
240  /**
241   * Get the rootdir.  Make sure its wholesome and exists before returning.
242   * @param rd
243   * @param c
244   * @param fs
245   * @return hbase.rootdir (after checks for existence and bootstrapping if
246   * needed populating the directory with necessary bootup files).
247   * @throws IOException
248   */
249  private Path checkRootDir(final Path rd, final Configuration c, final FileSystem fs)
250      throws IOException {
251    // If FS is in safe mode wait till out of it.
252    FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
253
254    // Filesystem is good. Go ahead and check for hbase.rootdir.
255    try {
256      if (!fs.exists(rd)) {
257        fs.mkdirs(rd);
258        // DFS leaves safe mode with 0 DNs when there are 0 blocks.
259        // We used to handle this by checking the current DN count and waiting until
260        // it is nonzero. With security, the check for datanode count doesn't work --
261        // it is a privileged op. So instead we adopt the strategy of the jobtracker
262        // and simply retry file creation during bootstrap indefinitely. As soon as
263        // there is one datanode it will succeed. Permission problems should have
264        // already been caught by mkdirs above.
265        FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
266          10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
267            HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
268      } else {
269        if (!fs.isDirectory(rd)) {
270          throw new IllegalArgumentException(rd.toString() + " is not a directory");
271        }
272        // as above
273        FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
274          10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
275            HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
276      }
277    } catch (DeserializationException de) {
278      LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for "
279        + HConstants.HBASE_DIR, de);
280      IOException ioe = new IOException();
281      ioe.initCause(de);
282      throw ioe;
283    } catch (IllegalArgumentException iae) {
284      LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for "
285        + HConstants.HBASE_DIR + " " + rd.toString(), iae);
286      throw iae;
287    }
288    // Make sure cluster ID exists
289    if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(
290        HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
291      FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
292    }
293    clusterId = FSUtils.getClusterId(fs, rd);
294
295    // Make sure the meta region directory exists!
296    if (!FSUtils.metaRegionExists(fs, rd)) {
297      bootstrap(rd, c);
298    }
299
300    // Create tableinfo-s for hbase:meta if not already there.
301    // assume, created table descriptor is for enabling table
302    // meta table is a system table, so descriptors are predefined,
303    // we should get them from registry.
304    FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd);
305    fsd.createTableDescriptor(fsd.get(TableName.META_TABLE_NAME));
306
307    return rd;
308  }
309
310  /**
311   * Make sure the hbase temp directory exists and is empty.
312   * NOTE that this method is only executed once just after the master becomes the active one.
313   */
314  @VisibleForTesting
315  void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
316      throws IOException {
317    // If the temp directory exists, clear the content (left over, from the previous run)
318    if (fs.exists(tmpdir)) {
319      // Archive table in temp, maybe left over from failed deletion,
320      // if not the cleaner will take care of them.
321      for (Path tableDir: FSUtils.getTableDirs(fs, tmpdir)) {
322        HFileArchiver.archiveRegions(c, fs, this.rootdir, tableDir,
323          FSUtils.getRegionDirs(fs, tableDir));
324      }
325      if (!fs.delete(tmpdir, true)) {
326        throw new IOException("Unable to clean the temp directory: " + tmpdir);
327      }
328    }
329
330    // Create the temp directory
331    if (isSecurityEnabled) {
332      if (!fs.mkdirs(tmpdir, secureRootSubDirPerms)) {
333        throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
334      }
335    } else {
336      if (!fs.mkdirs(tmpdir)) {
337        throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
338      }
339    }
340  }
341
342  /**
343   * Make sure the directories under rootDir have good permissions. Create if necessary.
344   * @param p
345   * @throws IOException
346   */
347  private void checkSubDir(final Path p, final String dirPermsConfName) throws IOException {
348    FileSystem fs = p.getFileSystem(conf);
349    FsPermission dirPerms = new FsPermission(conf.get(dirPermsConfName, "700"));
350    if (!fs.exists(p)) {
351      if (isSecurityEnabled) {
352        if (!fs.mkdirs(p, secureRootSubDirPerms)) {
353          throw new IOException("HBase directory '" + p + "' creation failure.");
354        }
355      } else {
356        if (!fs.mkdirs(p)) {
357          throw new IOException("HBase directory '" + p + "' creation failure.");
358        }
359      }
360    }
361    else {
362      if (isSecurityEnabled && !dirPerms.equals(fs.getFileStatus(p).getPermission())) {
363        // check whether the permission match
364        LOG.warn("Found HBase directory permissions NOT matching expected permissions for "
365            + p.toString() + " permissions=" + fs.getFileStatus(p).getPermission()
366            + ", expecting " + dirPerms + ". Automatically setting the permissions. "
367            + "You can change the permissions by setting \"" + dirPermsConfName + "\" in hbase-site.xml "
368            + "and restarting the master");
369        fs.setPermission(p, dirPerms);
370      }
371    }
372  }
373
374  /**
375   * Check permissions for bulk load staging directory. This directory has special hidden
376   * permissions. Create it if necessary.
377   * @throws IOException
378   */
379  private void checkStagingDir() throws IOException {
380    Path p = new Path(this.rootdir, HConstants.BULKLOAD_STAGING_DIR_NAME);
381    try {
382      if (!this.fs.exists(p)) {
383        if (!this.fs.mkdirs(p, HiddenDirPerms)) {
384          throw new IOException("Failed to create staging directory " + p.toString());
385        }
386      } else {
387        this.fs.setPermission(p, HiddenDirPerms);
388      }
389    } catch (IOException e) {
390      LOG.error("Failed to create or set permission on staging directory " + p.toString());
391      throw new IOException("Failed to create or set permission on staging directory "
392          + p.toString(), e);
393    }
394  }
395
396  private static void bootstrap(final Path rd, final Configuration c)
397  throws IOException {
398    LOG.info("BOOTSTRAP: creating hbase:meta region");
399    try {
400      // Bootstrapping, make sure blockcache is off.  Else, one will be
401      // created here in bootstrap and it'll need to be cleaned up.  Better to
402      // not make it in first place.  Turn off block caching for bootstrap.
403      // Enable after.
404      TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
405      HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rd,
406          c, setInfoFamilyCachingForMeta(metaDescriptor, false), null);
407      meta.close();
408    } catch (IOException e) {
409        e = e instanceof RemoteException ?
410                ((RemoteException)e).unwrapRemoteException() : e;
411      LOG.error("bootstrap", e);
412      throw e;
413    }
414  }
415
416  /**
417   * Enable in memory caching for hbase:meta
418   */
419  public static TableDescriptor setInfoFamilyCachingForMeta(TableDescriptor metaDescriptor, final boolean b) {
420    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(metaDescriptor);
421    for (ColumnFamilyDescriptor hcd: metaDescriptor.getColumnFamilies()) {
422      if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
423        builder.modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(hcd)
424                .setBlockCacheEnabled(b)
425                .setInMemory(b)
426                .build());
427      }
428    }
429    return builder.build();
430  }
431
432  public void deleteFamilyFromFS(RegionInfo region, byte[] familyName)
433      throws IOException {
434    deleteFamilyFromFS(rootdir, region, familyName);
435  }
436
437  public void deleteFamilyFromFS(Path rootDir, RegionInfo region, byte[] familyName)
438      throws IOException {
439    // archive family store files
440    Path tableDir = FSUtils.getTableDir(rootDir, region.getTable());
441    HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
442
443    // delete the family folder
444    Path familyDir = new Path(tableDir,
445      new Path(region.getEncodedName(), Bytes.toString(familyName)));
446    if (fs.delete(familyDir, true) == false) {
447      if (fs.exists(familyDir)) {
448        throw new IOException("Could not delete family "
449            + Bytes.toString(familyName) + " from FileSystem for region "
450            + region.getRegionNameAsString() + "(" + region.getEncodedName()
451            + ")");
452      }
453    }
454  }
455
456  public void stop() {
457  }
458
459  public void logFileSystemState(Logger log) throws IOException {
460    FSUtils.logFileSystemState(fs, rootdir, log);
461  }
462}