View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.util.ArrayList;
24  import java.util.HashSet;
25  import java.util.List;
26  import java.util.NavigableMap;
27  import java.util.Set;
28  import java.util.concurrent.locks.Lock;
29  import java.util.concurrent.locks.ReentrantLock;
30  
31  import org.apache.commons.logging.Log;
32  import org.apache.commons.logging.LogFactory;
33  import org.apache.hadoop.classification.InterfaceAudience;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.FileStatus;
36  import org.apache.hadoop.fs.FileSystem;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.fs.PathFilter;
39  import org.apache.hadoop.hbase.ClusterId;
40  import org.apache.hadoop.hbase.TableName;
41  import org.apache.hadoop.hbase.HColumnDescriptor;
42  import org.apache.hadoop.hbase.HConstants;
43  import org.apache.hadoop.hbase.HRegionInfo;
44  import org.apache.hadoop.hbase.HTableDescriptor;
45  import org.apache.hadoop.hbase.InvalidFamilyOperationException;
46  import org.apache.hadoop.hbase.RemoteExceptionHandler;
47  import org.apache.hadoop.hbase.Server;
48  import org.apache.hadoop.hbase.ServerName;
49  import org.apache.hadoop.hbase.backup.HFileArchiver;
50  import org.apache.hadoop.hbase.catalog.MetaReader;
51  import org.apache.hadoop.hbase.client.Result;
52  import org.apache.hadoop.hbase.exceptions.DeserializationException;
53  import org.apache.hadoop.hbase.fs.HFileSystem;
54  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
55  import org.apache.hadoop.hbase.regionserver.HRegion;
56  import org.apache.hadoop.hbase.regionserver.wal.HLog;
57  import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
58  import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
59  import org.apache.hadoop.hbase.util.Bytes;
60  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
61  import org.apache.hadoop.hbase.util.FSTableDescriptors;
62  import org.apache.hadoop.hbase.util.FSUtils;
63  import org.apache.zookeeper.KeeperException;
64  
65  /**
66   * This class abstracts a bunch of operations the HMaster needs to interact with
67   * the underlying file system, including splitting log files, checking file
68   * system status, etc.
69   */
70  @InterfaceAudience.Private
71  public class MasterFileSystem {
72    private static final Log LOG = LogFactory.getLog(MasterFileSystem.class.getName());
73    // HBase configuration
74    Configuration conf;
75    // master status
76    Server master;
77    // metrics for master
78    private final MetricsMasterFileSystem metricsMasterFilesystem = new MetricsMasterFileSystem();
79    // Persisted unique cluster ID
80    private ClusterId clusterId;
81    // Keep around for convenience.
82    private final FileSystem fs;
83    // Is the fileystem ok?
84    private volatile boolean fsOk = true;
85    // The Path to the old logs dir
86    private final Path oldLogDir;
87    // root hbase directory on the FS
88    private final Path rootdir;
89    // hbase temp directory used for table construction and deletion
90    private final Path tempdir;
91    // create the split log lock
92    final Lock splitLogLock = new ReentrantLock();
93    final boolean distributedLogReplay;
94    final SplitLogManager splitLogManager;
95    private final MasterServices services;
96  
97    final static PathFilter META_FILTER = new PathFilter() {
98      public boolean accept(Path p) {
99        return HLogUtil.isMetaFile(p);
100     }
101   };
102 
103   final static PathFilter NON_META_FILTER = new PathFilter() {
104     public boolean accept(Path p) {
105       return !HLogUtil.isMetaFile(p);
106     }
107   };
108 
109   public MasterFileSystem(Server master, MasterServices services)
110   throws IOException {
111     this.conf = master.getConfiguration();
112     this.master = master;
113     this.services = services;
114     // Set filesystem to be that of this.rootdir else we get complaints about
115     // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
116     // default localfs.  Presumption is that rootdir is fully-qualified before
117     // we get to here with appropriate fs scheme.
118     this.rootdir = FSUtils.getRootDir(conf);
119     this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
120     // Cover both bases, the old way of setting default fs and the new.
121     // We're supposed to run on 0.20 and 0.21 anyways.
122     this.fs = this.rootdir.getFileSystem(conf);
123     FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
124     // make sure the fs has the same conf
125     fs.setConf(conf);
126     // setup the filesystem variable
127     // set up the archived logs path
128     this.oldLogDir = createInitialFileSystemLayout();
129     HFileSystem.addLocationsOrderInterceptor(conf);
130     try {
131       this.splitLogManager = new SplitLogManager(master.getZooKeeper(),
132         master.getConfiguration(), master, services,
133         master.getServerName());
134     } catch (KeeperException e) {
135       throw new IOException(e);
136     }
137     this.distributedLogReplay = (this.splitLogManager.getRecoveryMode() == RecoveryMode.LOG_REPLAY);
138   }
139 
140   /**
141    * Create initial layout in filesystem.
142    * <ol>
143    * <li>Check if the meta region exists and is readable, if not create it.
144    * Create hbase.version and the hbase:meta directory if not one.
145    * </li>
146    * <li>Create a log archive directory for RS to put archived logs</li>
147    * </ol>
148    * Idempotent.
149    */
150   private Path createInitialFileSystemLayout() throws IOException {
151     // check if the root directory exists
152     checkRootDir(this.rootdir, conf, this.fs);
153 
154     // check if temp directory exists and clean it
155     checkTempDir(this.tempdir, conf, this.fs);
156 
157     Path oldLogDir = new Path(this.rootdir, HConstants.HREGION_OLDLOGDIR_NAME);
158 
159     // Make sure the region servers can archive their old logs
160     if(!this.fs.exists(oldLogDir)) {
161       this.fs.mkdirs(oldLogDir);
162     }
163 
164     return oldLogDir;
165   }
166 
167   public FileSystem getFileSystem() {
168     return this.fs;
169   }
170 
171   /**
172    * Get the directory where old logs go
173    * @return the dir
174    */
175   public Path getOldLogDir() {
176     return this.oldLogDir;
177   }
178 
179   /**
180    * Checks to see if the file system is still accessible.
181    * If not, sets closed
182    * @return false if file system is not available
183    */
184   public boolean checkFileSystem() {
185     if (this.fsOk) {
186       try {
187         FSUtils.checkFileSystemAvailable(this.fs);
188         FSUtils.checkDfsSafeMode(this.conf);
189       } catch (IOException e) {
190         master.abort("Shutting down HBase cluster: file system not available", e);
191         this.fsOk = false;
192       }
193     }
194     return this.fsOk;
195   }
196 
197   /**
198    * @return HBase root dir.
199    */
200   public Path getRootDir() {
201     return this.rootdir;
202   }
203 
204   /**
205    * @return HBase temp dir.
206    */
207   public Path getTempDir() {
208     return this.tempdir;
209   }
210 
211   /**
212    * @return The unique identifier generated for this cluster
213    */
214   public ClusterId getClusterId() {
215     return clusterId;
216   }
217 
218   /**
219    * Inspect the log directory to find dead servers which need recovery work
220    * @return A set of ServerNames which aren't running but still have WAL files left in file system
221    */
222   Set<ServerName> getFailedServersFromLogFolders() {
223     boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors",
224       HLog.SPLIT_SKIP_ERRORS_DEFAULT);
225 
226     Set<ServerName> serverNames = new HashSet<ServerName>();
227     Path logsDirPath = new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME);
228 
229     do {
230       if (master.isStopped()) {
231         LOG.warn("Master stopped while trying to get failed servers.");
232         break;
233       }
234       try {
235         if (!this.fs.exists(logsDirPath)) return serverNames;
236         FileStatus[] logFolders = FSUtils.listStatus(this.fs, logsDirPath, null);
237         // Get online servers after getting log folders to avoid log folder deletion of newly
238         // checked in region servers . see HBASE-5916
239         Set<ServerName> onlineServers = ((HMaster) master).getServerManager().getOnlineServers()
240             .keySet();
241 
242         if (logFolders == null || logFolders.length == 0) {
243           LOG.debug("No log files to split, proceeding...");
244           return serverNames;
245         }
246         for (FileStatus status : logFolders) {
247           String sn = status.getPath().getName();
248           // truncate splitting suffix if present (for ServerName parsing)
249           if (sn.endsWith(HLog.SPLITTING_EXT)) {
250             sn = sn.substring(0, sn.length() - HLog.SPLITTING_EXT.length());
251           }
252           ServerName serverName = ServerName.parseServerName(sn);
253           if (!onlineServers.contains(serverName)) {
254             LOG.info("Log folder " + status.getPath() + " doesn't belong "
255                 + "to a known region server, splitting");
256             serverNames.add(serverName);
257           } else {
258             LOG.info("Log folder " + status.getPath() + " belongs to an existing region server");
259           }
260         }
261         retrySplitting = false;
262       } catch (IOException ioe) {
263         LOG.warn("Failed getting failed servers to be recovered.", ioe);
264         if (!checkFileSystem()) {
265           LOG.warn("Bad Filesystem, exiting");
266           Runtime.getRuntime().halt(1);
267         }
268         try {
269           if (retrySplitting) {
270             Thread.sleep(conf.getInt("hbase.hlog.split.failure.retry.interval", 30 * 1000));
271           }
272         } catch (InterruptedException e) {
273           LOG.warn("Interrupted, aborting since cannot return w/o splitting");
274           Thread.currentThread().interrupt();
275           retrySplitting = false;
276           Runtime.getRuntime().halt(1);
277         }
278       }
279     } while (retrySplitting);
280 
281     return serverNames;
282   }
283 
284   public void splitLog(final ServerName serverName) throws IOException {
285     Set<ServerName> serverNames = new HashSet<ServerName>();
286     serverNames.add(serverName);
287     splitLog(serverNames);
288   }
289 
290   /**
291    * Specialized method to handle the splitting for meta HLog
292    * @param serverName
293    * @throws IOException
294    */
295   public void splitMetaLog(final ServerName serverName) throws IOException {
296     Set<ServerName> serverNames = new HashSet<ServerName>();
297     serverNames.add(serverName);
298     splitMetaLog(serverNames);
299   }
300 
301   /**
302    * Specialized method to handle the splitting for meta HLog
303    * @param serverNames
304    * @throws IOException
305    */
306   public void splitMetaLog(final Set<ServerName> serverNames) throws IOException {
307     splitLog(serverNames, META_FILTER);
308   }
309 
310   private List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOException {
311     List<Path> logDirs = new ArrayList<Path>();
312     boolean needReleaseLock = false;
313     if (!this.services.isInitialized()) {
314       // during master initialization, we could have multiple places splitting a same wal
315       this.splitLogLock.lock();
316       needReleaseLock = true;
317     }
318     try {
319       for (ServerName serverName : serverNames) {
320         Path logDir = new Path(this.rootdir, HLogUtil.getHLogDirectoryName(serverName.toString()));
321         Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
322         // Rename the directory so a rogue RS doesn't create more HLogs
323         if (fs.exists(logDir)) {
324           if (!this.fs.rename(logDir, splitDir)) {
325             throw new IOException("Failed fs.rename for log split: " + logDir);
326           }
327           logDir = splitDir;
328           LOG.debug("Renamed region directory: " + splitDir);
329         } else if (!fs.exists(splitDir)) {
330           LOG.info("Log dir for server " + serverName + " does not exist");
331           continue;
332         }
333         logDirs.add(splitDir);
334       }
335     } finally {
336       if (needReleaseLock) {
337         this.splitLogLock.unlock();
338       }
339     }
340     return logDirs;
341   }
342 
343   /**
344    * Mark regions in recovering state when distributedLogReplay are set true
345    * @param serverNames Set of ServerNames to be replayed wals in order to recover changes contained
346    *          in them
347    * @throws IOException
348    */
349   public void prepareLogReplay(Set<ServerName> serverNames) throws IOException {
350     if (!this.distributedLogReplay) {
351       return;
352     }
353     // mark regions in recovering state
354     for (ServerName serverName : serverNames) {
355       NavigableMap<HRegionInfo, Result> regions = this.getServerUserRegions(serverName);
356       if (regions == null) {
357         continue;
358       }
359       try {
360         this.splitLogManager.markRegionsRecoveringInZK(serverName, regions.keySet());
361       } catch (KeeperException e) {
362         throw new IOException(e);
363       }
364     }
365   }
366 
367   /**
368    * Mark regions in recovering state when distributedLogReplay are set true
369    * @param serverName Failed region server whose wals to be replayed
370    * @param regions Set of regions to be recovered
371    * @throws IOException
372    */
373   public void prepareLogReplay(ServerName serverName, Set<HRegionInfo> regions) throws IOException {
374     if (!this.distributedLogReplay) {
375       return;
376     }
377     // mark regions in recovering state
378     if (regions == null || regions.isEmpty()) {
379       return;
380     }
381     try {
382       this.splitLogManager.markRegionsRecoveringInZK(serverName, regions);
383     } catch (KeeperException e) {
384       throw new IOException(e);
385     }
386   }
387 
388   public void splitLog(final Set<ServerName> serverNames) throws IOException {
389     splitLog(serverNames, NON_META_FILTER);
390   }
391 
392   /**
393    * Wrapper function on {@link SplitLogManager#removeStaleRecoveringRegionsFromZK(Set)}
394    * @param failedServers
395    * @throws KeeperException
396    */
397   void removeStaleRecoveringRegionsFromZK(final Set<ServerName> failedServers)
398       throws KeeperException, InterruptedIOException {
399     this.splitLogManager.removeStaleRecoveringRegionsFromZK(failedServers);
400   }
401 
402   /**
403    * This method is the base split method that splits HLog files matching a filter. Callers should
404    * pass the appropriate filter for meta and non-meta HLogs.
405    * @param serverNames
406    * @param filter
407    * @throws IOException
408    */
409   public void splitLog(final Set<ServerName> serverNames, PathFilter filter) throws IOException {
410     long splitTime = 0, splitLogSize = 0;
411     List<Path> logDirs = getLogDirs(serverNames);
412 
413     splitLogManager.handleDeadWorkers(serverNames);
414     splitTime = EnvironmentEdgeManager.currentTimeMillis();
415     splitLogSize = splitLogManager.splitLogDistributed(serverNames, logDirs, filter);
416     splitTime = EnvironmentEdgeManager.currentTimeMillis() - splitTime;
417 
418     if (this.metricsMasterFilesystem != null) {
419       if (filter == META_FILTER) {
420         this.metricsMasterFilesystem.addMetaWALSplit(splitTime, splitLogSize);
421       } else {
422         this.metricsMasterFilesystem.addSplit(splitTime, splitLogSize);
423       }
424     }
425   }
426 
427   /**
428    * Get the rootdir.  Make sure its wholesome and exists before returning.
429    * @param rd
430    * @param c
431    * @param fs
432    * @return hbase.rootdir (after checks for existence and bootstrapping if
433    * needed populating the directory with necessary bootup files).
434    * @throws IOException
435    */
436   @SuppressWarnings("deprecation")
437   private Path checkRootDir(final Path rd, final Configuration c,
438     final FileSystem fs)
439   throws IOException {
440     // If FS is in safe mode wait till out of it.
441     FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
442     // Filesystem is good. Go ahead and check for hbase.rootdir.
443     try {
444       if (!fs.exists(rd)) {
445         fs.mkdirs(rd);
446         // DFS leaves safe mode with 0 DNs when there are 0 blocks.
447         // We used to handle this by checking the current DN count and waiting until
448         // it is nonzero. With security, the check for datanode count doesn't work --
449         // it is a privileged op. So instead we adopt the strategy of the jobtracker
450         // and simply retry file creation during bootstrap indefinitely. As soon as
451         // there is one datanode it will succeed. Permission problems should have
452         // already been caught by mkdirs above.
453         FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
454           10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
455             HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
456       } else {
457         if (!fs.isDirectory(rd)) {
458           throw new IllegalArgumentException(rd.toString() + " is not a directory");
459         }
460         // as above
461         FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
462           10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
463             HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
464       }
465     } catch (DeserializationException de) {
466       LOG.fatal("Please fix invalid configuration for " + HConstants.HBASE_DIR, de);
467       IOException ioe = new IOException();
468       ioe.initCause(de);
469       throw ioe;
470     } catch (IllegalArgumentException iae) {
471       LOG.fatal("Please fix invalid configuration for "
472         + HConstants.HBASE_DIR + " " + rd.toString(), iae);
473       throw iae;
474     }
475     // Make sure cluster ID exists
476     if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(
477         HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
478       FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
479     }
480     clusterId = FSUtils.getClusterId(fs, rd);
481 
482     // Make sure the meta region directory exists!
483     if (!FSUtils.metaRegionExists(fs, rd)) {
484       bootstrap(rd, c);
485     } else {
486       // Migrate table descriptor files if necessary
487       org.apache.hadoop.hbase.util.FSTableDescriptorMigrationToSubdir
488         .migrateFSTableDescriptorsIfNecessary(fs, rd);
489     }
490       
491     // Create tableinfo-s for hbase:meta if not already there.
492     new FSTableDescriptors(fs, rd).createTableDescriptor(HTableDescriptor.META_TABLEDESC);
493 
494     return rd;
495   }
496 
497   /**
498    * Make sure the hbase temp directory exists and is empty.
499    * NOTE that this method is only executed once just after the master becomes the active one.
500    */
501   private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
502       throws IOException {
503     // If the temp directory exists, clear the content (left over, from the previous run)
504     if (fs.exists(tmpdir)) {
505       // Archive table in temp, maybe left over from failed deletion,
506       // if not the cleaner will take care of them.
507       for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
508         for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
509           HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
510         }
511       }
512       if (!fs.delete(tmpdir, true)) {
513         throw new IOException("Unable to clean the temp directory: " + tmpdir);
514       }
515     }
516 
517     // Create the temp directory
518     if (!fs.mkdirs(tmpdir)) {
519       throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
520     }
521   }
522 
523   private static void bootstrap(final Path rd, final Configuration c)
524   throws IOException {
525     LOG.info("BOOTSTRAP: creating hbase:meta region");
526     try {
527       // Bootstrapping, make sure blockcache is off.  Else, one will be
528       // created here in bootstrap and it'll need to be cleaned up.  Better to
529       // not make it in first place.  Turn off block caching for bootstrap.
530       // Enable after.
531       HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
532       setInfoFamilyCachingForMeta(false);
533       HRegion meta = HRegion.createHRegion(metaHRI, rd, c,
534           HTableDescriptor.META_TABLEDESC);
535       setInfoFamilyCachingForMeta(true);
536       HRegion.closeHRegion(meta);
537     } catch (IOException e) {
538       e = RemoteExceptionHandler.checkIOException(e);
539       LOG.error("bootstrap", e);
540       throw e;
541     }
542   }
543 
544   /**
545    * Enable in memory caching for hbase:meta
546    */
547   public static void setInfoFamilyCachingForMeta(final boolean b) {
548     for (HColumnDescriptor hcd:
549         HTableDescriptor.META_TABLEDESC.getColumnFamilies()) {
550       if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
551         hcd.setBlockCacheEnabled(b);
552         hcd.setInMemory(b);
553       }
554     }
555   }
556 
557 
558   public void deleteRegion(HRegionInfo region) throws IOException {
559     HFileArchiver.archiveRegion(conf, fs, region);
560   }
561 
562   public void deleteTable(TableName tableName) throws IOException {
563     fs.delete(FSUtils.getTableDir(rootdir, tableName), true);
564   }
565 
566   /**
567    * Move the specified table to the hbase temp directory
568    * @param tableName Table name to move
569    * @return The temp location of the table moved
570    * @throws IOException in case of file-system failure
571    */
572   public Path moveTableToTemp(TableName tableName) throws IOException {
573     Path srcPath = FSUtils.getTableDir(rootdir, tableName);
574     Path tempPath = FSUtils.getTableDir(this.tempdir, tableName);
575 
576     // Ensure temp exists
577     if (!fs.exists(tempPath.getParent()) && !fs.mkdirs(tempPath.getParent())) {
578       throw new IOException("HBase temp directory '" + tempPath.getParent() + "' creation failure.");
579     }
580 
581     if (!fs.rename(srcPath, tempPath)) {
582       throw new IOException("Unable to move '" + srcPath + "' to temp '" + tempPath + "'");
583     }
584 
585     return tempPath;
586   }
587 
588   public void updateRegionInfo(HRegionInfo region) {
589     // TODO implement this.  i think this is currently broken in trunk i don't
590     //      see this getting updated.
591     //      @see HRegion.checkRegioninfoOnFilesystem()
592   }
593 
594   public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
595       throws IOException {
596     // archive family store files
597     Path tableDir = FSUtils.getTableDir(rootdir, region.getTable());
598     HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
599 
600     // delete the family folder
601     Path familyDir = new Path(tableDir,
602       new Path(region.getEncodedName(), Bytes.toString(familyName)));
603     if (fs.delete(familyDir, true) == false) {
604       throw new IOException("Could not delete family "
605           + Bytes.toString(familyName) + " from FileSystem for region "
606           + region.getRegionNameAsString() + "(" + region.getEncodedName()
607           + ")");
608     }
609   }
610 
611   public void stop() {
612     if (splitLogManager != null) {
613       this.splitLogManager.stop();
614     }
615   }
616 
617   /**
618    * Delete column of a table
619    * @param tableName
620    * @param familyName
621    * @return Modified HTableDescriptor with requested column deleted.
622    * @throws IOException
623    */
624   public HTableDescriptor deleteColumn(TableName tableName, byte[] familyName)
625       throws IOException {
626     LOG.info("DeleteColumn. Table = " + tableName
627         + " family = " + Bytes.toString(familyName));
628     HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
629     htd.removeFamily(familyName);
630     this.services.getTableDescriptors().add(htd);
631     return htd;
632   }
633 
634   /**
635    * Modify Column of a table
636    * @param tableName
637    * @param hcd HColumnDesciptor
638    * @return Modified HTableDescriptor with the column modified.
639    * @throws IOException
640    */
641   public HTableDescriptor modifyColumn(TableName tableName, HColumnDescriptor hcd)
642       throws IOException {
643     LOG.info("AddModifyColumn. Table = " + tableName
644         + " HCD = " + hcd.toString());
645 
646     HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
647     byte [] familyName = hcd.getName();
648     if(!htd.hasFamily(familyName)) {
649       throw new InvalidFamilyOperationException("Family '" +
650         Bytes.toString(familyName) + "' doesn't exists so cannot be modified");
651     }
652     htd.addFamily(hcd);
653     this.services.getTableDescriptors().add(htd);
654     return htd;
655   }
656 
657   /**
658    * Add column to a table
659    * @param tableName
660    * @param hcd
661    * @return Modified HTableDescriptor with new column added.
662    * @throws IOException
663    */
664   public HTableDescriptor addColumn(TableName tableName, HColumnDescriptor hcd)
665       throws IOException {
666     LOG.info("AddColumn. Table = " + tableName + " HCD = " +
667       hcd.toString());
668     HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
669     if (htd == null) {
670       throw new InvalidFamilyOperationException("Family '" +
671         hcd.getNameAsString() + "' cannot be modified as HTD is null");
672     }
673     htd.addFamily(hcd);
674     this.services.getTableDescriptors().add(htd);
675     return htd;
676   }
677 
678   private NavigableMap<HRegionInfo, Result> getServerUserRegions(ServerName serverName)
679       throws IOException {
680     if (!this.master.isStopped()) {
681       try {
682         this.master.getCatalogTracker().waitForMeta();
683         return MetaReader.getServerUserRegions(this.master.getCatalogTracker(), serverName);
684       } catch (InterruptedException e) {
685         throw (InterruptedIOException)new InterruptedIOException().initCause(e);
686       }
687     }
688     return null;
689   }
690 
691   /**
692    * The function is used in SSH to set recovery mode based on configuration after all outstanding
693    * log split tasks drained.
694    * @throws KeeperException
695    * @throws InterruptedIOException
696    */
697   public void setLogRecoveryMode() throws IOException {
698     try {
699       this.splitLogManager.setRecoveryMode(false);
700     } catch (KeeperException e) {
701       throw new IOException(e);
702     }
703   }
704 
705   public RecoveryMode getLogRecoveryMode() {
706     return this.splitLogManager.getRecoveryMode();
707   }
708 }