View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22
23  import org.apache.commons.logging.Log;
24  import org.apache.commons.logging.LogFactory;
25  import org.apache.hadoop.conf.Configuration;
26  import org.apache.hadoop.fs.FileSystem;
27  import org.apache.hadoop.fs.Path;
28  import org.apache.hadoop.fs.permission.FsPermission;
29  import org.apache.hadoop.hbase.ClusterId;
30  import org.apache.hadoop.hbase.HColumnDescriptor;
31  import org.apache.hadoop.hbase.HConstants;
32  import org.apache.hadoop.hbase.HRegionInfo;
33  import org.apache.hadoop.hbase.HTableDescriptor;
34  import org.apache.hadoop.hbase.TableName;
35  import org.apache.hadoop.hbase.backup.HFileArchiver;
36  import org.apache.hadoop.hbase.classification.InterfaceAudience;
37  import org.apache.hadoop.hbase.exceptions.DeserializationException;
38  import org.apache.hadoop.hbase.fs.HFileSystem;
39  import org.apache.hadoop.hbase.mob.MobConstants;
40  import org.apache.hadoop.hbase.mob.MobUtils;
41  import org.apache.hadoop.hbase.regionserver.HRegion;
42  import org.apache.hadoop.hbase.util.Bytes;
43  import org.apache.hadoop.hbase.util.FSTableDescriptors;
44  import org.apache.hadoop.hbase.util.FSUtils;
45  import org.apache.hadoop.ipc.RemoteException;
46
47  /**
48   * This class abstracts a bunch of operations the HMaster needs to interact with
49   * the underlying file system like creating the initial layout, checking file
50   * system status, etc.
51   */
52  @InterfaceAudience.Private
53  public class MasterFileSystem {
54    private static final Log LOG = LogFactory.getLog(MasterFileSystem.class);
55
56    // HBase configuration
57    private final Configuration conf;
58    // Persisted unique cluster ID
59    private ClusterId clusterId;
60    // Keep around for convenience.
61    private final FileSystem fs;
62    // root hbase directory on the FS
63    private final Path rootdir;
64    // hbase temp directory used for table construction and deletion
65    private final Path tempdir;
66
67    private final MasterServices services;
68
69    public MasterFileSystem(MasterServices services) throws IOException {
70      this.conf = services.getConfiguration();
71      this.services = services;
72      // Set filesystem to be that of this.rootdir else we get complaints about
73      // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
74      // default localfs.  Presumption is that rootdir is fully-qualified before
75      // we get to here with appropriate fs scheme.
76      this.rootdir = FSUtils.getRootDir(conf);
77      this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
78      // Cover both bases, the old way of setting default fs and the new.
79      // We're supposed to run on 0.20 and 0.21 anyways.
80      this.fs = this.rootdir.getFileSystem(conf);
81      FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
82      // make sure the fs has the same conf
83      fs.setConf(conf);
84      // setup the filesystem variable
85      createInitialFileSystemLayout();
86      HFileSystem.addLocationsOrderInterceptor(conf);
87    }
88
89    /**
90     * Create initial layout in filesystem.
91     * <ol>
92     * <li>Check if the meta region exists and is readable, if not create it.
93     * Create hbase.version and the hbase:meta directory if not one.
94     * </li>
95     * </ol>
96     * Idempotent.
97     */
98    private void createInitialFileSystemLayout() throws IOException {
99      // check if the root directory exists
100     checkRootDir(this.rootdir, conf, this.fs);
101
102     // check if temp directory exists and clean it
103     checkTempDir(this.tempdir, conf, this.fs);
104   }
105
106   public FileSystem getFileSystem() {
107     return this.fs;
108   }
109
110   public Configuration getConfiguration() {
111     return this.conf;
112   }
113
114   /**
115    * @return HBase root dir.
116    */
117   public Path getRootDir() {
118     return this.rootdir;
119   }
120
121   /**
122    * @return HBase temp dir.
123    */
124   public Path getTempDir() {
125     return this.tempdir;
126   }
127
128   /**
129    * @return The unique identifier generated for this cluster
130    */
131   public ClusterId getClusterId() {
132     return clusterId;
133   }
134
135   /**
136    * Get the rootdir.  Make sure its wholesome and exists before returning.
137    * @param rd
138    * @param c
139    * @param fs
140    * @return hbase.rootdir (after checks for existence and bootstrapping if
141    * needed populating the directory with necessary bootup files).
142    * @throws IOException
143    */
144   private Path checkRootDir(final Path rd, final Configuration c, final FileSystem fs)
145       throws IOException {
146     // If FS is in safe mode wait till out of it.
147     FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
148
149     boolean isSecurityEnabled = "kerberos".equalsIgnoreCase(c.get("hbase.security.authentication"));
150     FsPermission rootDirPerms = new FsPermission(c.get("hbase.rootdir.perms", "700"));
151
152     // Filesystem is good. Go ahead and check for hbase.rootdir.
153     try {
154       if (!fs.exists(rd)) {
155         if (isSecurityEnabled) {
156           fs.mkdirs(rd, rootDirPerms);
157         } else {
158           fs.mkdirs(rd);
159         }
160         // DFS leaves safe mode with 0 DNs when there are 0 blocks.
161         // We used to handle this by checking the current DN count and waiting until
162         // it is nonzero. With security, the check for datanode count doesn't work --
163         // it is a privileged op. So instead we adopt the strategy of the jobtracker
164         // and simply retry file creation during bootstrap indefinitely. As soon as
165         // there is one datanode it will succeed. Permission problems should have
166         // already been caught by mkdirs above.
167         FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
168           10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
169             HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
170       } else {
171         if (!fs.isDirectory(rd)) {
172           throw new IllegalArgumentException(rd.toString() + " is not a directory");
173         }
174         if (isSecurityEnabled && !rootDirPerms.equals(fs.getFileStatus(rd).getPermission())) {
175           // check whether the permission match
176           LOG.warn("Found rootdir permissions NOT matching expected \"hbase.rootdir.perms\" for "
177               + "rootdir=" + rd.toString() + " permissions=" + fs.getFileStatus(rd).getPermission()
178               + " and  \"hbase.rootdir.perms\" configured as "
179               + c.get("hbase.rootdir.perms", "700") + ". Automatically setting the permissions. You"
180               + " can change the permissions by setting \"hbase.rootdir.perms\" in hbase-site.xml "
181               + "and restarting the master");
182           fs.setPermission(rd, rootDirPerms);
183         }
184         // as above
185         FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
186           10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
187             HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
188       }
189     } catch (DeserializationException de) {
190       LOG.fatal("Please fix invalid configuration for " + HConstants.HBASE_DIR, de);
191       IOException ioe = new IOException();
192       ioe.initCause(de);
193       throw ioe;
194     } catch (IllegalArgumentException iae) {
195       LOG.fatal("Please fix invalid configuration for "
196         + HConstants.HBASE_DIR + " " + rd.toString(), iae);
197       throw iae;
198     }
199     // Make sure cluster ID exists
200     if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(
201         HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
202       FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
203     }
204     clusterId = FSUtils.getClusterId(fs, rd);
205
206     // Make sure the meta region directory exists!
207     if (!FSUtils.metaRegionExists(fs, rd)) {
208       bootstrap(rd, c);
209     }
210
211     // Create tableinfo-s for hbase:meta if not already there.
212     // assume, created table descriptor is for enabling table
213     // meta table is a system table, so descriptors are predefined,
214     // we should get them from registry.
215     FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd);
216     fsd.createTableDescriptor(fsd.get(TableName.META_TABLE_NAME));
217
218     return rd;
219   }
220
221   /**
222    * Make sure the hbase temp directory exists and is empty.
223    * NOTE that this method is only executed once just after the master becomes the active one.
224    */
225   private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
226       throws IOException {
227     // If the temp directory exists, clear the content (left over, from the previous run)
228     if (fs.exists(tmpdir)) {
229       // Archive table in temp, maybe left over from failed deletion,
230       // if not the cleaner will take care of them.
231       for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
232         for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
233           HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
234         }
235       }
236       if (!fs.delete(tmpdir, true)) {
237         throw new IOException("Unable to clean the temp directory: " + tmpdir);
238       }
239     }
240
241     // Create the temp directory
242     if (!fs.mkdirs(tmpdir)) {
243       throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
244     }
245   }
246
247   private static void bootstrap(final Path rd, final Configuration c)
248   throws IOException {
249     LOG.info("BOOTSTRAP: creating hbase:meta region");
250     try {
251       // Bootstrapping, make sure blockcache is off.  Else, one will be
252       // created here in bootstrap and it'll need to be cleaned up.  Better to
253       // not make it in first place.  Turn off block caching for bootstrap.
254       // Enable after.
255       HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
256       HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
257       setInfoFamilyCachingForMeta(metaDescriptor, false);
258       HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null);
259       setInfoFamilyCachingForMeta(metaDescriptor, true);
260       meta.close();
261     } catch (IOException e) {
262         e = e instanceof RemoteException ?
263                 ((RemoteException)e).unwrapRemoteException() : e;
264       LOG.error("bootstrap", e);
265       throw e;
266     }
267   }
268
269   /**
270    * Enable in memory caching for hbase:meta
271    */
272   public static void setInfoFamilyCachingForMeta(HTableDescriptor metaDescriptor, final boolean b) {
273     for (HColumnDescriptor hcd: metaDescriptor.getColumnFamilies()) {
274       if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
275         hcd.setBlockCacheEnabled(b);
276         hcd.setInMemory(b);
277       }
278     }
279   }
280
281   public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName, boolean hasMob)
282       throws IOException {
283     // archive family store files
284     Path tableDir = FSUtils.getTableDir(rootdir, region.getTable());
285     HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
286
287     // delete the family folder
288     Path familyDir = new Path(tableDir,
289       new Path(region.getEncodedName(), Bytes.toString(familyName)));
290     if (fs.delete(familyDir, true) == false) {
291       if (fs.exists(familyDir)) {
292         throw new IOException("Could not delete family "
293             + Bytes.toString(familyName) + " from FileSystem for region "
294             + region.getRegionNameAsString() + "(" + region.getEncodedName()
295             + ")");
296       }
297     }
298
299     // archive and delete mob files
300     if (hasMob) {
301       Path mobTableDir =
302           FSUtils.getTableDir(new Path(getRootDir(), MobConstants.MOB_DIR_NAME), region.getTable());
303       HRegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(region.getTable());
304       Path mobFamilyDir =
305           new Path(mobTableDir,
306               new Path(mobRegionInfo.getEncodedName(), Bytes.toString(familyName)));
307       // archive mob family store files
308       MobUtils.archiveMobStoreFiles(conf, fs, mobRegionInfo, mobFamilyDir, familyName);
309
310       if (!fs.delete(mobFamilyDir, true)) {
311         throw new IOException("Could not delete mob store files for family "
312             + Bytes.toString(familyName) + " from FileSystem region "
313             + mobRegionInfo.getRegionNameAsString() + "(" + mobRegionInfo.getEncodedName() + ")");
314       }
315     }
316   }
317
318   public void stop() {
319   }
320
321   public void logFileSystemState(Log log) throws IOException {
322     FSUtils.logFileSystemState(fs, rootdir, log);
323   }
324 }