View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.util.ArrayList;
24  import java.util.HashSet;
25  import java.util.List;
26  import java.util.NavigableMap;
27  import java.util.Set;
28  import java.util.concurrent.locks.Lock;
29  import java.util.concurrent.locks.ReentrantLock;
30  
31  import org.apache.commons.logging.Log;
32  import org.apache.commons.logging.LogFactory;
33  import org.apache.hadoop.classification.InterfaceAudience;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.FileStatus;
36  import org.apache.hadoop.fs.FileSystem;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.fs.PathFilter;
39  import org.apache.hadoop.hbase.ClusterId;
40  import org.apache.hadoop.hbase.TableName;
41  import org.apache.hadoop.hbase.HColumnDescriptor;
42  import org.apache.hadoop.hbase.HConstants;
43  import org.apache.hadoop.hbase.HRegionInfo;
44  import org.apache.hadoop.hbase.HTableDescriptor;
45  import org.apache.hadoop.hbase.InvalidFamilyOperationException;
46  import org.apache.hadoop.hbase.RemoteExceptionHandler;
47  import org.apache.hadoop.hbase.Server;
48  import org.apache.hadoop.hbase.ServerName;
49  import org.apache.hadoop.hbase.backup.HFileArchiver;
50  import org.apache.hadoop.hbase.catalog.MetaReader;
51  import org.apache.hadoop.hbase.client.Result;
52  import org.apache.hadoop.hbase.exceptions.DeserializationException;
53  import org.apache.hadoop.hbase.fs.HFileSystem;
54  import org.apache.hadoop.hbase.regionserver.HRegion;
55  import org.apache.hadoop.hbase.regionserver.wal.HLog;
56  import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
57  import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
58  import org.apache.hadoop.hbase.util.Bytes;
59  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
60  import org.apache.hadoop.hbase.util.FSTableDescriptors;
61  import org.apache.hadoop.hbase.util.FSUtils;
62  import org.apache.zookeeper.KeeperException;
63  
64  /**
65   * This class abstracts a bunch of operations the HMaster needs to interact with
66   * the underlying file system, including splitting log files, checking file
67   * system status, etc.
68   */
69  @InterfaceAudience.Private
70  public class MasterFileSystem {
71    private static final Log LOG = LogFactory.getLog(MasterFileSystem.class.getName());
72    // HBase configuration
73    Configuration conf;
74    // master status
75    Server master;
76    // metrics for master
77    private final MetricsMasterFileSystem metricsMasterFilesystem = new MetricsMasterFileSystem();
78    // Persisted unique cluster ID
79    private ClusterId clusterId;
80    // Keep around for convenience.
81    private final FileSystem fs;
82    // Is the fileystem ok?
83    private volatile boolean fsOk = true;
84    // The Path to the old logs dir
85    private final Path oldLogDir;
86    // root hbase directory on the FS
87    private final Path rootdir;
88    // hbase temp directory used for table construction and deletion
89    private final Path tempdir;
90    // create the split log lock
91    final Lock splitLogLock = new ReentrantLock();
92    final boolean distributedLogReplay;
93    final SplitLogManager splitLogManager;
94    private final MasterServices services;
95  
96    final static PathFilter META_FILTER = new PathFilter() {
97      public boolean accept(Path p) {
98        return HLogUtil.isMetaFile(p);
99      }
100   };
101 
102   final static PathFilter NON_META_FILTER = new PathFilter() {
103     public boolean accept(Path p) {
104       return !HLogUtil.isMetaFile(p);
105     }
106   };
107 
108   public MasterFileSystem(Server master, MasterServices services, boolean masterRecovery)
109   throws IOException {
110     this.conf = master.getConfiguration();
111     this.master = master;
112     this.services = services;
113     // Set filesystem to be that of this.rootdir else we get complaints about
114     // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
115     // default localfs.  Presumption is that rootdir is fully-qualified before
116     // we get to here with appropriate fs scheme.
117     this.rootdir = FSUtils.getRootDir(conf);
118     this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
119     // Cover both bases, the old way of setting default fs and the new.
120     // We're supposed to run on 0.20 and 0.21 anyways.
121     this.fs = this.rootdir.getFileSystem(conf);
122     FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
123     // make sure the fs has the same conf
124     fs.setConf(conf);
125     this.distributedLogReplay = HLogSplitter.isDistributedLogReplay(this.conf);
126     // setup the filesystem variable
127     // set up the archived logs path
128     this.oldLogDir = createInitialFileSystemLayout();
129     HFileSystem.addLocationsOrderInterceptor(conf);
130     this.splitLogManager = new SplitLogManager(master.getZooKeeper(),
131       master.getConfiguration(), master, services,
132       master.getServerName(), masterRecovery);
133   }
134 
135   /**
136    * Create initial layout in filesystem.
137    * <ol>
138    * <li>Check if the meta region exists and is readable, if not create it.
139    * Create hbase.version and the hbase:meta directory if not one.
140    * </li>
141    * <li>Create a log archive directory for RS to put archived logs</li>
142    * </ol>
143    * Idempotent.
144    */
145   private Path createInitialFileSystemLayout() throws IOException {
146     // check if the root directory exists
147     checkRootDir(this.rootdir, conf, this.fs);
148 
149     // check if temp directory exists and clean it
150     checkTempDir(this.tempdir, conf, this.fs);
151 
152     Path oldLogDir = new Path(this.rootdir, HConstants.HREGION_OLDLOGDIR_NAME);
153 
154     // Make sure the region servers can archive their old logs
155     if(!this.fs.exists(oldLogDir)) {
156       this.fs.mkdirs(oldLogDir);
157     }
158 
159     return oldLogDir;
160   }
161 
162   public FileSystem getFileSystem() {
163     return this.fs;
164   }
165 
166   /**
167    * Get the directory where old logs go
168    * @return the dir
169    */
170   public Path getOldLogDir() {
171     return this.oldLogDir;
172   }
173 
174   /**
175    * Checks to see if the file system is still accessible.
176    * If not, sets closed
177    * @return false if file system is not available
178    */
179   public boolean checkFileSystem() {
180     if (this.fsOk) {
181       try {
182         FSUtils.checkFileSystemAvailable(this.fs);
183         FSUtils.checkDfsSafeMode(this.conf);
184       } catch (IOException e) {
185         master.abort("Shutting down HBase cluster: file system not available", e);
186         this.fsOk = false;
187       }
188     }
189     return this.fsOk;
190   }
191 
192   /**
193    * @return HBase root dir.
194    */
195   public Path getRootDir() {
196     return this.rootdir;
197   }
198 
199   /**
200    * @return HBase temp dir.
201    */
202   public Path getTempDir() {
203     return this.tempdir;
204   }
205 
206   /**
207    * @return The unique identifier generated for this cluster
208    */
209   public ClusterId getClusterId() {
210     return clusterId;
211   }
212 
213   /**
214    * Inspect the log directory to find dead servers which need recovery work
215    * @return A set of ServerNames which aren't running but still have WAL files left in file system
216    */
217   Set<ServerName> getFailedServersFromLogFolders() {
218     boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors",
219       HLog.SPLIT_SKIP_ERRORS_DEFAULT);
220 
221     Set<ServerName> serverNames = new HashSet<ServerName>();
222     Path logsDirPath = new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME);
223 
224     do {
225       if (master.isStopped()) {
226         LOG.warn("Master stopped while trying to get failed servers.");
227         break;
228       }
229       try {
230         if (!this.fs.exists(logsDirPath)) return serverNames;
231         FileStatus[] logFolders = FSUtils.listStatus(this.fs, logsDirPath, null);
232         // Get online servers after getting log folders to avoid log folder deletion of newly
233         // checked in region servers . see HBASE-5916
234         Set<ServerName> onlineServers = ((HMaster) master).getServerManager().getOnlineServers()
235             .keySet();
236 
237         if (logFolders == null || logFolders.length == 0) {
238           LOG.debug("No log files to split, proceeding...");
239           return serverNames;
240         }
241         for (FileStatus status : logFolders) {
242           String sn = status.getPath().getName();
243           // truncate splitting suffix if present (for ServerName parsing)
244           if (sn.endsWith(HLog.SPLITTING_EXT)) {
245             sn = sn.substring(0, sn.length() - HLog.SPLITTING_EXT.length());
246           }
247           ServerName serverName = ServerName.parseServerName(sn);
248           if (!onlineServers.contains(serverName)) {
249             LOG.info("Log folder " + status.getPath() + " doesn't belong "
250                 + "to a known region server, splitting");
251             serverNames.add(serverName);
252           } else {
253             LOG.info("Log folder " + status.getPath() + " belongs to an existing region server");
254           }
255         }
256         retrySplitting = false;
257       } catch (IOException ioe) {
258         LOG.warn("Failed getting failed servers to be recovered.", ioe);
259         if (!checkFileSystem()) {
260           LOG.warn("Bad Filesystem, exiting");
261           Runtime.getRuntime().halt(1);
262         }
263         try {
264           if (retrySplitting) {
265             Thread.sleep(conf.getInt("hbase.hlog.split.failure.retry.interval", 30 * 1000));
266           }
267         } catch (InterruptedException e) {
268           LOG.warn("Interrupted, aborting since cannot return w/o splitting");
269           Thread.currentThread().interrupt();
270           retrySplitting = false;
271           Runtime.getRuntime().halt(1);
272         }
273       }
274     } while (retrySplitting);
275 
276     return serverNames;
277   }
278 
279   public void splitLog(final ServerName serverName) throws IOException {
280     Set<ServerName> serverNames = new HashSet<ServerName>();
281     serverNames.add(serverName);
282     splitLog(serverNames);
283   }
284 
285   /**
286    * Specialized method to handle the splitting for meta HLog
287    * @param serverName
288    * @throws IOException
289    */
290   public void splitMetaLog(final ServerName serverName) throws IOException {
291     Set<ServerName> serverNames = new HashSet<ServerName>();
292     serverNames.add(serverName);
293     splitMetaLog(serverNames);
294   }
295 
296   /**
297    * Specialized method to handle the splitting for meta HLog
298    * @param serverNames
299    * @throws IOException
300    */
301   public void splitMetaLog(final Set<ServerName> serverNames) throws IOException {
302     splitLog(serverNames, META_FILTER);
303   }
304 
305   private List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOException {
306     List<Path> logDirs = new ArrayList<Path>();
307     boolean needReleaseLock = false;
308     if (!this.services.isInitialized()) {
309       // during master initialization, we could have multiple places splitting a same wal
310       this.splitLogLock.lock();
311       needReleaseLock = true;
312     }
313     try {
314       for (ServerName serverName : serverNames) {
315         Path logDir = new Path(this.rootdir, HLogUtil.getHLogDirectoryName(serverName.toString()));
316         Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
317         // Rename the directory so a rogue RS doesn't create more HLogs
318         if (fs.exists(logDir)) {
319           if (!this.fs.rename(logDir, splitDir)) {
320             throw new IOException("Failed fs.rename for log split: " + logDir);
321           }
322           logDir = splitDir;
323           LOG.debug("Renamed region directory: " + splitDir);
324         } else if (!fs.exists(splitDir)) {
325           LOG.info("Log dir for server " + serverName + " does not exist");
326           continue;
327         }
328         logDirs.add(splitDir);
329       }
330     } finally {
331       if (needReleaseLock) {
332         this.splitLogLock.unlock();
333       }
334     }
335     return logDirs;
336   }
337 
338   /**
339    * Mark regions in recovering state when distributedLogReplay are set true
340    * @param serverNames Set of ServerNames to be replayed wals in order to recover changes contained
341    *          in them
342    * @throws IOException
343    */
344   public void prepareLogReplay(Set<ServerName> serverNames) throws IOException {
345     if (!this.distributedLogReplay) {
346       return;
347     }
348     // mark regions in recovering state
349     for (ServerName serverName : serverNames) {
350       NavigableMap<HRegionInfo, Result> regions = this.getServerUserRegions(serverName);
351       if (regions == null) {
352         continue;
353       }
354       try {
355         this.splitLogManager.markRegionsRecoveringInZK(serverName, regions.keySet());
356       } catch (KeeperException e) {
357         throw new IOException(e);
358       }
359     }
360   }
361 
362   /**
363    * Mark regions in recovering state when distributedLogReplay are set true
364    * @param serverName Failed region server whose wals to be replayed
365    * @param regions Set of regions to be recovered
366    * @throws IOException
367    */
368   public void prepareLogReplay(ServerName serverName, Set<HRegionInfo> regions) throws IOException {
369     if (!this.distributedLogReplay) {
370       return;
371     }
372     // mark regions in recovering state
373     if (regions == null || regions.isEmpty()) {
374       return;
375     }
376     try {
377       this.splitLogManager.markRegionsRecoveringInZK(serverName, regions);
378     } catch (KeeperException e) {
379       throw new IOException(e);
380     }
381   }
382 
383   public void splitLog(final Set<ServerName> serverNames) throws IOException {
384     splitLog(serverNames, NON_META_FILTER);
385   }
386 
387   /**
388    * Wrapper function on {@link SplitLogManager#removeStaleRecoveringRegionsFromZK(Set)}
389    * @param failedServers
390    * @throws KeeperException
391    */
392   void removeStaleRecoveringRegionsFromZK(final Set<ServerName> failedServers)
393       throws KeeperException, InterruptedIOException {
394     this.splitLogManager.removeStaleRecoveringRegionsFromZK(failedServers);
395   }
396 
397   /**
398    * This method is the base split method that splits HLog files matching a filter. Callers should
399    * pass the appropriate filter for meta and non-meta HLogs.
400    * @param serverNames
401    * @param filter
402    * @throws IOException
403    */
404   public void splitLog(final Set<ServerName> serverNames, PathFilter filter) throws IOException {
405     long splitTime = 0, splitLogSize = 0;
406     List<Path> logDirs = getLogDirs(serverNames);
407 
408     splitLogManager.handleDeadWorkers(serverNames);
409     splitTime = EnvironmentEdgeManager.currentTimeMillis();
410     splitLogSize = splitLogManager.splitLogDistributed(serverNames, logDirs, filter);
411     splitTime = EnvironmentEdgeManager.currentTimeMillis() - splitTime;
412 
413     if (this.metricsMasterFilesystem != null) {
414       if (filter == META_FILTER) {
415         this.metricsMasterFilesystem.addMetaWALSplit(splitTime, splitLogSize);
416       } else {
417         this.metricsMasterFilesystem.addSplit(splitTime, splitLogSize);
418       }
419     }
420   }
421 
422   /**
423    * Get the rootdir.  Make sure its wholesome and exists before returning.
424    * @param rd
425    * @param c
426    * @param fs
427    * @return hbase.rootdir (after checks for existence and bootstrapping if
428    * needed populating the directory with necessary bootup files).
429    * @throws IOException
430    */
431   @SuppressWarnings("deprecation")
432   private Path checkRootDir(final Path rd, final Configuration c,
433     final FileSystem fs)
434   throws IOException {
435     // If FS is in safe mode wait till out of it.
436     FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
437     // Filesystem is good. Go ahead and check for hbase.rootdir.
438     try {
439       if (!fs.exists(rd)) {
440         fs.mkdirs(rd);
441         // DFS leaves safe mode with 0 DNs when there are 0 blocks.
442         // We used to handle this by checking the current DN count and waiting until
443         // it is nonzero. With security, the check for datanode count doesn't work --
444         // it is a privileged op. So instead we adopt the strategy of the jobtracker
445         // and simply retry file creation during bootstrap indefinitely. As soon as
446         // there is one datanode it will succeed. Permission problems should have
447         // already been caught by mkdirs above.
448         FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
449           10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
450             HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
451       } else {
452         if (!fs.isDirectory(rd)) {
453           throw new IllegalArgumentException(rd.toString() + " is not a directory");
454         }
455         // as above
456         FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
457           10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
458             HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
459       }
460     } catch (DeserializationException de) {
461       LOG.fatal("Please fix invalid configuration for " + HConstants.HBASE_DIR, de);
462       IOException ioe = new IOException();
463       ioe.initCause(de);
464       throw ioe;
465     } catch (IllegalArgumentException iae) {
466       LOG.fatal("Please fix invalid configuration for "
467         + HConstants.HBASE_DIR + " " + rd.toString(), iae);
468       throw iae;
469     }
470     // Make sure cluster ID exists
471     if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(
472         HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
473       FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
474     }
475     clusterId = FSUtils.getClusterId(fs, rd);
476 
477     // Make sure the meta region directory exists!
478     if (!FSUtils.metaRegionExists(fs, rd)) {
479       bootstrap(rd, c);
480     } else {
481       // Migrate table descriptor files if necessary
482       org.apache.hadoop.hbase.util.FSTableDescriptorMigrationToSubdir
483         .migrateFSTableDescriptorsIfNecessary(fs, rd);
484     }
485       
486     // Create tableinfo-s for hbase:meta if not already there.
487     new FSTableDescriptors(fs, rd).createTableDescriptor(HTableDescriptor.META_TABLEDESC);
488 
489     return rd;
490   }
491 
492   /**
493    * Make sure the hbase temp directory exists and is empty.
494    * NOTE that this method is only executed once just after the master becomes the active one.
495    */
496   private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
497       throws IOException {
498     // If the temp directory exists, clear the content (left over, from the previous run)
499     if (fs.exists(tmpdir)) {
500       // Archive table in temp, maybe left over from failed deletion,
501       // if not the cleaner will take care of them.
502       for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
503         for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
504           HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
505         }
506       }
507       if (!fs.delete(tmpdir, true)) {
508         throw new IOException("Unable to clean the temp directory: " + tmpdir);
509       }
510     }
511 
512     // Create the temp directory
513     if (!fs.mkdirs(tmpdir)) {
514       throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
515     }
516   }
517 
518   private static void bootstrap(final Path rd, final Configuration c)
519   throws IOException {
520     LOG.info("BOOTSTRAP: creating hbase:meta region");
521     try {
522       // Bootstrapping, make sure blockcache is off.  Else, one will be
523       // created here in bootstrap and it'll need to be cleaned up.  Better to
524       // not make it in first place.  Turn off block caching for bootstrap.
525       // Enable after.
526       HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
527       setInfoFamilyCachingForMeta(false);
528       HRegion meta = HRegion.createHRegion(metaHRI, rd, c,
529           HTableDescriptor.META_TABLEDESC);
530       setInfoFamilyCachingForMeta(true);
531       HRegion.closeHRegion(meta);
532     } catch (IOException e) {
533       e = RemoteExceptionHandler.checkIOException(e);
534       LOG.error("bootstrap", e);
535       throw e;
536     }
537   }
538 
539   /**
540    * Enable in memory caching for hbase:meta
541    */
542   public static void setInfoFamilyCachingForMeta(final boolean b) {
543     for (HColumnDescriptor hcd:
544         HTableDescriptor.META_TABLEDESC.getColumnFamilies()) {
545       if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
546         hcd.setBlockCacheEnabled(b);
547         hcd.setInMemory(b);
548       }
549     }
550   }
551 
552 
553   public void deleteRegion(HRegionInfo region) throws IOException {
554     HFileArchiver.archiveRegion(conf, fs, region);
555   }
556 
557   public void deleteTable(TableName tableName) throws IOException {
558     fs.delete(FSUtils.getTableDir(rootdir, tableName), true);
559   }
560 
561   /**
562    * Move the specified table to the hbase temp directory
563    * @param tableName Table name to move
564    * @return The temp location of the table moved
565    * @throws IOException in case of file-system failure
566    */
567   public Path moveTableToTemp(TableName tableName) throws IOException {
568     Path srcPath = FSUtils.getTableDir(rootdir, tableName);
569     Path tempPath = FSUtils.getTableDir(this.tempdir, tableName);
570 
571     // Ensure temp exists
572     if (!fs.exists(tempPath.getParent()) && !fs.mkdirs(tempPath.getParent())) {
573       throw new IOException("HBase temp directory '" + tempPath.getParent() + "' creation failure.");
574     }
575 
576     if (!fs.rename(srcPath, tempPath)) {
577       throw new IOException("Unable to move '" + srcPath + "' to temp '" + tempPath + "'");
578     }
579 
580     return tempPath;
581   }
582 
583   public void updateRegionInfo(HRegionInfo region) {
584     // TODO implement this.  i think this is currently broken in trunk i don't
585     //      see this getting updated.
586     //      @see HRegion.checkRegioninfoOnFilesystem()
587   }
588 
589   public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
590       throws IOException {
591     // archive family store files
592     Path tableDir = FSUtils.getTableDir(rootdir, region.getTable());
593     HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
594 
595     // delete the family folder
596     Path familyDir = new Path(tableDir,
597       new Path(region.getEncodedName(), Bytes.toString(familyName)));
598     if (fs.delete(familyDir, true) == false) {
599       throw new IOException("Could not delete family "
600           + Bytes.toString(familyName) + " from FileSystem for region "
601           + region.getRegionNameAsString() + "(" + region.getEncodedName()
602           + ")");
603     }
604   }
605 
606   public void stop() {
607     if (splitLogManager != null) {
608       this.splitLogManager.stop();
609     }
610   }
611 
612   /**
613    * Delete column of a table
614    * @param tableName
615    * @param familyName
616    * @return Modified HTableDescriptor with requested column deleted.
617    * @throws IOException
618    */
619   public HTableDescriptor deleteColumn(TableName tableName, byte[] familyName)
620       throws IOException {
621     LOG.info("DeleteColumn. Table = " + tableName
622         + " family = " + Bytes.toString(familyName));
623     HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
624     htd.removeFamily(familyName);
625     this.services.getTableDescriptors().add(htd);
626     return htd;
627   }
628 
629   /**
630    * Modify Column of a table
631    * @param tableName
632    * @param hcd HColumnDesciptor
633    * @return Modified HTableDescriptor with the column modified.
634    * @throws IOException
635    */
636   public HTableDescriptor modifyColumn(TableName tableName, HColumnDescriptor hcd)
637       throws IOException {
638     LOG.info("AddModifyColumn. Table = " + tableName
639         + " HCD = " + hcd.toString());
640 
641     HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
642     byte [] familyName = hcd.getName();
643     if(!htd.hasFamily(familyName)) {
644       throw new InvalidFamilyOperationException("Family '" +
645         Bytes.toString(familyName) + "' doesn't exists so cannot be modified");
646     }
647     htd.addFamily(hcd);
648     this.services.getTableDescriptors().add(htd);
649     return htd;
650   }
651 
652   /**
653    * Add column to a table
654    * @param tableName
655    * @param hcd
656    * @return Modified HTableDescriptor with new column added.
657    * @throws IOException
658    */
659   public HTableDescriptor addColumn(TableName tableName, HColumnDescriptor hcd)
660       throws IOException {
661     LOG.info("AddColumn. Table = " + tableName + " HCD = " +
662       hcd.toString());
663     HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
664     if (htd == null) {
665       throw new InvalidFamilyOperationException("Family '" +
666         hcd.getNameAsString() + "' cannot be modified as HTD is null");
667     }
668     htd.addFamily(hcd);
669     this.services.getTableDescriptors().add(htd);
670     return htd;
671   }
672 
673   private NavigableMap<HRegionInfo, Result> getServerUserRegions(ServerName serverName)
674       throws IOException {
675     if (!this.master.isStopped()) {
676       try {
677         this.master.getCatalogTracker().waitForMeta();
678         return MetaReader.getServerUserRegions(this.master.getCatalogTracker(), serverName);
679       } catch (InterruptedException e) {
680         throw (InterruptedIOException)new InterruptedIOException().initCause(e);
681       }
682     }
683     return null;
684   }
685 }