View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.util;
19  
20  import java.io.FileNotFoundException;
21  import java.io.IOException;
22  import java.util.Arrays;
23  import java.util.Comparator;
24  import java.util.List;
25  import java.util.Map;
26  import java.util.TreeMap;
27  import java.util.concurrent.ConcurrentHashMap;
28  import java.util.regex.Matcher;
29  import java.util.regex.Pattern;
30  
31  import org.apache.commons.lang.NotImplementedException;
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.FSDataInputStream;
36  import org.apache.hadoop.fs.FSDataOutputStream;
37  import org.apache.hadoop.fs.FileStatus;
38  import org.apache.hadoop.fs.FileSystem;
39  import org.apache.hadoop.fs.Path;
40  import org.apache.hadoop.fs.PathFilter;
41  import org.apache.hadoop.hbase.HBaseFileSystem;
42  import org.apache.hadoop.hbase.HConstants;
43  import org.apache.hadoop.hbase.HTableDescriptor;
44  import org.apache.hadoop.hbase.TableDescriptors;
45  import org.apache.hadoop.hbase.TableInfoMissingException;
46  
47  /**
48   * Implementation of {@link TableDescriptors} that reads descriptors from the
49   * passed filesystem.  It expects descriptors to be in a file under the
50   * table's directory in FS.  Can be read-only -- i.e. does not modify
51   * the filesystem or can be read and write.
52   * 
53   * <p>Also has utility for keeping up the table descriptors tableinfo file.
54   * The table schema file is kept under the table directory in the filesystem.
55   * It has a {@link #TABLEINFO_NAME} prefix and then a suffix that is the
56   * edit sequenceid: e.g. <code>.tableinfo.0000000003</code>.  This sequenceid
57   * is always increasing.  It starts at zero.  The table schema file with the
58   * highest sequenceid has the most recent schema edit. Usually there is one file
59   * only, the most recent but there may be short periods where there are more
60   * than one file. Old files are eventually cleaned.  Presumption is that there
61   * will not be lots of concurrent clients making table schema edits.  If so,
62   * the below needs a bit of a reworking and perhaps some supporting api in hdfs.
63   */
64  public class FSTableDescriptors implements TableDescriptors {
65    private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class);
66    private final FileSystem fs;
67    private final Path rootdir;
68    private final boolean fsreadonly;
69    long cachehits = 0;
70    long invocations = 0;
71  
72    /** The file name used to store HTD in HDFS  */
73    public static final String TABLEINFO_NAME = ".tableinfo";
74  
75    // This cache does not age out the old stuff.  Thinking is that the amount
76    // of data we keep up in here is so small, no need to do occasional purge.
77    // TODO.
78    private final Map<String, TableDescriptorModtime> cache =
79      new ConcurrentHashMap<String, TableDescriptorModtime>();
80  
81    /**
82     * Data structure to cache a table descriptor, the time it was modified,
83     * and the time the table directory was modified.
84     */
85    static class TableDescriptorModtime {
86      private final HTableDescriptor descriptor;
87      private final long modtime;
88      private final long dirmodtime;
89  
90      TableDescriptorModtime(final long modtime, final long dirmodtime, final HTableDescriptor htd) {
91        this.descriptor = htd;
92        this.modtime = modtime;
93        this.dirmodtime = dirmodtime;
94      }
95  
96      long getModtime() {
97        return this.modtime;
98      }
99      
100     long getDirModtime() {
101       return this.dirmodtime;
102     }
103 
104     HTableDescriptor getTableDescriptor() {
105       return this.descriptor;
106     }
107   }
108 
109   public FSTableDescriptors(final FileSystem fs, final Path rootdir) {
110     this(fs, rootdir, false);
111   }
112 
113   /**
114    * @param fs
115    * @param rootdir
116    * @param fsreadOnly True if we are read-only when it comes to filesystem
117    * operations; i.e. on remove, we do not do delete in fs.
118    */
119   public FSTableDescriptors(final FileSystem fs, final Path rootdir,
120       final boolean fsreadOnly) {
121     super();
122     this.fs = fs;
123     this.rootdir = rootdir;
124     this.fsreadonly = fsreadOnly;
125   }
126 
127   /* (non-Javadoc)
128    * @see org.apache.hadoop.hbase.TableDescriptors#getHTableDescriptor(java.lang.String)
129    */
130   @Override
131   public HTableDescriptor get(final byte [] tablename)
132   throws IOException {
133     return get(Bytes.toString(tablename));
134   }
135 
136   /* (non-Javadoc)
137    * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptor(byte[])
138    */
139   @Override
140   public HTableDescriptor get(final String tablename)
141   throws IOException {
142     invocations++;
143     if (HTableDescriptor.ROOT_TABLEDESC.getNameAsString().equals(tablename)) {
144       cachehits++;
145       return HTableDescriptor.ROOT_TABLEDESC;
146     }
147     if (HTableDescriptor.META_TABLEDESC.getNameAsString().equals(tablename)) {
148       cachehits++;
149       return HTableDescriptor.META_TABLEDESC;
150     }
151     // .META. and -ROOT- is already handled. If some one tries to get the descriptor for
152     // .logs, .oldlogs or .corrupt throw an exception.
153     if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename)) {
154        throw new IOException("No descriptor found for table = " + tablename);
155     }
156 
157     // Look in cache of descriptors.
158     TableDescriptorModtime cachedtdm = this.cache.get(tablename);
159 
160     if (cachedtdm != null) {
161       // Check mod time has not changed (this is trip to NN).
162       // First check directory modtime as it doesn't require a scan of the full table directory
163       long tableDirModtime = getTableDirModtime(fs, this.rootdir, tablename);
164       boolean cachehit = false;
165       if (tableDirModtime <= cachedtdm.getDirModtime()) {
166         // table dir not changed since our cached entry
167         cachehit = true;
168       } else if (getTableInfoModtime(this.fs, this.rootdir, tablename) <= cachedtdm.getModtime()) {
169         // the table dir has changed (perhaps a region split) but the info file itself has not
170         // so the cached descriptor is good, we just need to update the entry
171         this.cache.put(tablename, new TableDescriptorModtime(cachedtdm.getModtime(),
172             tableDirModtime, cachedtdm.getTableDescriptor()));
173         cachehit = true;
174       }  // else table info file has been changed, need to read it 
175       if (cachehit) {
176         cachehits++;
177         return cachedtdm.getTableDescriptor();
178       }
179    }
180     
181     TableDescriptorModtime tdmt = null;
182     try {
183       tdmt = getTableDescriptorModtime(this.fs, this.rootdir, tablename, true);
184     } catch (NullPointerException e) {
185       LOG.debug("Exception during readTableDecriptor. Current table name = "
186           + tablename, e);
187     } catch (IOException ioe) {
188       LOG.debug("Exception during readTableDecriptor. Current table name = "
189           + tablename, ioe);
190     }
191     
192     if (tdmt != null) {
193       this.cache.put(tablename, tdmt);
194     }
195     return tdmt == null ? null : tdmt.getTableDescriptor();
196   }
197 
198   /* (non-Javadoc)
199    * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)
200    */
201   @Override
202   public Map<String, HTableDescriptor> getAll()
203   throws IOException {
204     Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
205     List<Path> tableDirs = FSUtils.getTableDirs(fs, rootdir);
206     for (Path d: tableDirs) {
207       HTableDescriptor htd = null;
208       try {
209 
210         htd = get(d.getName());
211       } catch (FileNotFoundException fnfe) {
212         // inability of retrieving one HTD shouldn't stop getting the remaining
213         LOG.warn("Trouble retrieving htd", fnfe);
214       }
215       if (htd == null) continue;
216       htds.put(d.getName(), htd);
217     }
218     return htds;
219   }
220 
221   @Override
222   public void add(HTableDescriptor htd) throws IOException {
223     if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) {
224       throw new NotImplementedException();
225     }
226     if (Bytes.equals(HConstants.META_TABLE_NAME, htd.getName())) {
227       throw new NotImplementedException();
228     }
229     if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) {
230       throw new NotImplementedException();
231     }
232     if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd);
233     String tableName = htd.getNameAsString();
234     long modtime = getTableInfoModtime(this.fs, this.rootdir, tableName);
235     long dirmodtime = getTableDirModtime(this.fs, this.rootdir, tableName);
236     this.cache.put(tableName, new TableDescriptorModtime(modtime, dirmodtime, htd));
237   }
238 
239   @Override
240   public HTableDescriptor remove(final String tablename)
241   throws IOException {
242     if (!this.fsreadonly) {
243       Path tabledir = FSUtils.getTablePath(this.rootdir, tablename);
244       if (this.fs.exists(tabledir)) {
245         if (!HBaseFileSystem.deleteDirFromFileSystem(fs, tabledir)) {
246           throw new IOException("Failed delete of " + tabledir.toString());
247         }
248       }
249     }
250     TableDescriptorModtime tdm = this.cache.remove(tablename);
251     return tdm == null ? null : tdm.getTableDescriptor();
252   }
253 
254   /**
255    * Checks if <code>.tableinfo<code> exists for given table
256    * 
257    * @param fs file system
258    * @param rootdir root directory of HBase installation
259    * @param tableName name of table
260    * @return true if exists
261    * @throws IOException
262    */
263   public static boolean isTableInfoExists(FileSystem fs, Path rootdir,
264       String tableName) throws IOException {
265     FileStatus status = getTableInfoPath(fs, rootdir, tableName);
266     return status == null? false: fs.exists(status.getPath());
267   }
268 
269   private static FileStatus getTableInfoPath(final FileSystem fs,
270       final Path rootdir, final String tableName)
271   throws IOException {
272     Path tabledir = FSUtils.getTablePath(rootdir, tableName);
273     return getTableInfoPath(fs, tabledir);
274   }
275 
276   /**
277    * Looks under the table directory in the filesystem for files with a
278    * {@link #TABLEINFO_NAME} prefix.  Returns reference to the 'latest' instance.
279    * @param fs
280    * @param tabledir
281    * @return The 'current' tableinfo file.
282    * @throws IOException
283    */
284   public static FileStatus getTableInfoPath(final FileSystem fs,
285       final Path tabledir)
286   throws IOException {
287     FileStatus [] status = FSUtils.listStatus(fs, tabledir, new PathFilter() {
288       @Override
289       public boolean accept(Path p) {
290         // Accept any file that starts with TABLEINFO_NAME
291         return p.getName().startsWith(TABLEINFO_NAME);
292       }
293     });
294     if (status == null || status.length < 1) return null;
295     Arrays.sort(status, new FileStatusFileNameComparator());
296     if (status.length > 1) {
297       // Clean away old versions of .tableinfo
298       for (int i = 1; i < status.length; i++) {
299         Path p = status[i].getPath();
300         // Clean up old versions
301         if (!HBaseFileSystem.deleteFileFromFileSystem(fs, p)) {
302           LOG.warn("Failed cleanup of " + status);
303         } else {
304           LOG.debug("Cleaned up old tableinfo file " + p);
305         }
306       }
307     }
308     return status[0];
309   }
310 
311   /**
312    * Compare {@link FileStatus} instances by {@link Path#getName()}.
313    * Returns in reverse order.
314    */
315   static class FileStatusFileNameComparator
316   implements Comparator<FileStatus> {
317     @Override
318     public int compare(FileStatus left, FileStatus right) {
319       return -left.compareTo(right);
320     }
321   }
322 
323   /**
324    * Width of the sequenceid that is a suffix on a tableinfo file.
325    */
326   static final int WIDTH_OF_SEQUENCE_ID = 10;
327 
328   /*
329    * @param number Number to use as suffix.
330    * @return Returns zero-prefixed 5-byte wide decimal version of passed
331    * number (Does absolute in case number is negative).
332    */
333   static String formatTableInfoSequenceId(final int number) {
334     byte [] b = new byte[WIDTH_OF_SEQUENCE_ID];
335     int d = Math.abs(number);
336     for (int i = b.length - 1; i >= 0; i--) {
337       b[i] = (byte)((d % 10) + '0');
338       d /= 10;
339     }
340     return Bytes.toString(b);
341   }
342 
343   /**
344    * Regex to eat up sequenceid suffix on a .tableinfo file.
345    * Use regex because may encounter oldstyle .tableinfos where there is no
346    * sequenceid on the end.
347    */
348   private static final Pattern SUFFIX =
349     Pattern.compile(TABLEINFO_NAME + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$");
350 
351 
352   /**
353    * @param p Path to a <code>.tableinfo</code> file.
354    * @return The current editid or 0 if none found.
355    */
356   static int getTableInfoSequenceid(final Path p) {
357     if (p == null) return 0;
358     Matcher m = SUFFIX.matcher(p.getName());
359     if (!m.matches()) throw new IllegalArgumentException(p.toString());
360     String suffix = m.group(2);
361     if (suffix == null || suffix.length() <= 0) return 0;
362     return Integer.parseInt(m.group(2));
363   }
364 
365   /**
366    * @param tabledir
367    * @param sequenceid
368    * @return Name of tableinfo file.
369    */
370   static Path getTableInfoFileName(final Path tabledir, final int sequenceid) {
371     return new Path(tabledir,
372       TABLEINFO_NAME + "." + formatTableInfoSequenceId(sequenceid));
373   }
374 
375   static long getTableDirModtime(final FileSystem fs, final Path rootdir,
376       final String tableName)
377   throws IOException {
378     Path tabledir = FSUtils.getTablePath(rootdir, tableName);
379     FileStatus status = fs.getFileStatus(tabledir);
380     return status == null? 0: status.getModificationTime();
381   }
382   
383   /**
384    * @param fs
385    * @param rootdir
386    * @param tableName
387    * @return Modification time for the table {@link #TABLEINFO_NAME} file
388    * or <code>0</code> if no tableinfo file found.
389    * @throws IOException
390    */
391   static long getTableInfoModtime(final FileSystem fs, final Path rootdir,
392       final String tableName)
393   throws IOException {
394     FileStatus status = getTableInfoPath(fs, rootdir, tableName);
395     return status == null? 0: status.getModificationTime();
396   }
397 
398   /**
399    * Returns the latest table descriptor for the given table directly from the file system
400    * if it exists, bypassing the local cache.
401    * Returns null if it's not found.
402    */
403   public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
404       Path hbaseRootDir, String tableName) throws IOException {
405     // ignore both -ROOT- and .META. tables
406     if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0
407         || Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) {
408       return null;
409     }
410     Path tableDir = FSUtils.getTablePath(hbaseRootDir, tableName);
411     return getTableDescriptorFromFs(fs, tableDir);
412   }
413 
414   /**
415    * Returns the latest table descriptor for the table located at the given directory
416    * directly from the file system if it exists.
417    * @throws TableInfoMissingException if there is no descriptor
418    */
419   public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
420   throws IOException {
421     FileStatus status = getTableInfoPath(fs, tableDir);
422     if (status == null) {
423       throw new TableInfoMissingException("No table descriptor file under " + tableDir);
424     }
425     FSDataInputStream fsDataInputStream = fs.open(status.getPath());
426     HTableDescriptor hTableDescriptor = null;
427     try {
428       hTableDescriptor = new HTableDescriptor();
429       hTableDescriptor.readFields(fsDataInputStream);
430     } finally {
431       fsDataInputStream.close();
432     }
433     return hTableDescriptor;
434   }
435 
436   /**
437    * Get HTD from HDFS.
438    * @param fs
439    * @param hbaseRootDir
440    * @param tableName
441    * @return Descriptor or null if none found.
442    * @throws IOException
443    */
444   public static HTableDescriptor getTableDescriptor(FileSystem fs,
445       Path hbaseRootDir, byte[] tableName)
446   throws IOException {
447      HTableDescriptor htd = null;
448      try {
449        TableDescriptorModtime tdmt =
450          getTableDescriptorModtime(fs, hbaseRootDir, Bytes.toString(tableName), false);
451        htd = tdmt == null ? null : tdmt.getTableDescriptor();
452      } catch (NullPointerException e) {
453        LOG.debug("Exception during readTableDecriptor. Current table name = "
454            + Bytes.toString(tableName), e);
455      }
456      return htd;
457   }
458 
459   static HTableDescriptor getTableDescriptor(FileSystem fs,
460       Path hbaseRootDir, String tableName) throws NullPointerException, IOException {
461     TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, hbaseRootDir, tableName, false);
462     return tdmt == null ? null : tdmt.getTableDescriptor();
463   }
464 
465   static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs,
466       Path hbaseRootDir, String tableName, boolean readDirModtime)
467   throws NullPointerException, IOException{
468     // ignore both -ROOT- and .META. tables
469     if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0
470         || Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) {
471       return null;
472     }
473     return getTableDescriptorModtime(fs, FSUtils.getTablePath(hbaseRootDir, tableName), readDirModtime);
474   }
475 
476   /**
477    * @param fs filesystem
478    * @param tableDir path to table directory
479    * @param readDirModtime true if dirmodtime should be read also
480    * @return TableDescriptorModtime or null if no table descriptor was found
481    * at the specified path
482    * @throws IOException
483    */
484   static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs, Path tableDir, boolean readDirModtime)
485   throws NullPointerException, IOException {
486     if (tableDir == null) throw new NullPointerException();
487     FileStatus status = getTableInfoPath(fs, tableDir);
488     if (status == null) {
489       return null;
490     }
491     FSDataInputStream fsDataInputStream = fs.open(status.getPath());
492     HTableDescriptor hTableDescriptor = null;
493     try {
494       hTableDescriptor = new HTableDescriptor();
495       hTableDescriptor.readFields(fsDataInputStream);
496     } finally {
497       fsDataInputStream.close();
498     }
499     long dirModtime = 0;
500     if (readDirModtime) {
501       dirModtime = fs.getFileStatus(tableDir).getModificationTime();
502     }
503     return new TableDescriptorModtime(status.getModificationTime(), dirModtime, hTableDescriptor);
504   }
505   
506   public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
507   throws IOException, NullPointerException {
508     TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, tableDir, false);
509     return tdmt == null? null: tdmt.getTableDescriptor();
510   }
511  
512 
513   /**
514    * Update table descriptor
515    * @param fs
516    * @param conf
517    * @param hTableDescriptor
518    * @return New tableinfo or null if we failed update.
519    * @throws IOException Thrown if failed update.
520    */
521   static Path updateHTableDescriptor(FileSystem fs, Path rootdir,
522       HTableDescriptor hTableDescriptor)
523   throws IOException {
524     Path tableDir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName());
525     Path p = writeTableDescriptor(fs, hTableDescriptor, tableDir,
526       getTableInfoPath(fs, tableDir));
527     if (p == null) throw new IOException("Failed update");
528     LOG.info("Updated tableinfo=" + p);
529     return p;
530   }
531 
532   /**
533    * Deletes a table's directory from the file system if exists. Used in unit
534    * tests.
535    */
536   public static void deleteTableDescriptorIfExists(String tableName,
537       Configuration conf) throws IOException {
538     FileSystem fs = FSUtils.getCurrentFileSystem(conf);
539     FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName);
540     // The below deleteDirectory works for either file or directory.
541     if (status != null && fs.exists(status.getPath())) {
542       FSUtils.deleteDirectory(fs, status.getPath());
543     }
544   }
545 
546   /**
547    * @param fs
548    * @param hTableDescriptor
549    * @param tableDir
550    * @param status
551    * @return Descriptor file or null if we failed write.
552    * @throws IOException 
553    */
554   private static Path writeTableDescriptor(final FileSystem fs,
555       final HTableDescriptor hTableDescriptor, final Path tableDir,
556       final FileStatus status)
557   throws IOException {
558     // Get temporary dir into which we'll first write a file to avoid
559     // half-written file phenomeon.
560     Path tmpTableDir = new Path(tableDir, ".tmp");
561     // What is current sequenceid?  We read the current sequenceid from
562     // the current file.  After we read it, another thread could come in and
563     // compete with us writing out next version of file.  The below retries
564     // should help in this case some but its hard to do guarantees in face of
565     // concurrent schema edits.
566     int currentSequenceid =
567       status == null? 0: getTableInfoSequenceid(status.getPath());
568     int sequenceid = currentSequenceid;
569     // Put arbitrary upperbound on how often we retry
570     int retries = 10;
571     int retrymax = currentSequenceid + retries;
572     Path tableInfoPath = null;
573     do {
574       sequenceid += 1;
575       Path p = getTableInfoFileName(tmpTableDir, sequenceid);
576       if (fs.exists(p)) {
577         LOG.debug(p + " exists; retrying up to " + retries + " times");
578         continue;
579       }
580       try {
581         writeHTD(fs, p, hTableDescriptor);
582         tableInfoPath = getTableInfoFileName(tableDir, sequenceid);
583         if (!HBaseFileSystem.renameDirForFileSystem(fs, p, tableInfoPath)) {
584           throw new IOException("Failed rename of " + p + " to " + tableInfoPath);
585         }
586       } catch (IOException ioe) {
587         // Presume clash of names or something; go around again.
588         LOG.debug("Failed write and/or rename; retrying", ioe);
589         if (!FSUtils.deleteDirectory(fs, p)) {
590           LOG.warn("Failed cleanup of " + p);
591         }
592         tableInfoPath = null;
593         continue;
594       }
595       // Cleanup old schema file.
596       if (status != null) {
597         if (!FSUtils.deleteDirectory(fs, status.getPath())) {
598           LOG.warn("Failed delete of " + status.getPath() + "; continuing");
599         }
600       }
601       break;
602     } while (sequenceid < retrymax);
603     return tableInfoPath;
604   }
605 
606   private static void writeHTD(final FileSystem fs, final Path p,
607       final HTableDescriptor htd)
608   throws IOException {
609     FSDataOutputStream out = HBaseFileSystem.createPathOnFileSystem(fs, p, false);
610     try {
611       htd.write(out);
612       out.write('\n');
613       out.write('\n');
614       out.write(Bytes.toBytes(htd.toString()));
615     } finally {
616       out.close();
617     }
618   }
619 
620   /**
621    * Create new HTableDescriptor in HDFS. Happens when we are creating table.
622    * 
623    * @param htableDescriptor
624    * @param conf
625    */
626   public static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
627       Configuration conf)
628   throws IOException {
629     return createTableDescriptor(htableDescriptor, conf, false);
630   }
631 
632   /**
633    * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
634    * forceCreation is true then even if previous table descriptor is present it
635    * will be overwritten
636    * 
637    * @param htableDescriptor
638    * @param conf
639    * @param forceCreation True if we are to overwrite existing file.
640    */
641   static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
642       final Configuration conf, boolean forceCreation)
643   throws IOException {
644     FileSystem fs = FSUtils.getCurrentFileSystem(conf);
645     return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor,
646         forceCreation);
647   }
648 
649   /**
650    * Create new HTableDescriptor in HDFS. Happens when we are creating table.
651    * Used by tests.
652    * @param fs
653    * @param htableDescriptor
654    * @param rootdir
655    */
656   public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
657       HTableDescriptor htableDescriptor)
658   throws IOException {
659     return createTableDescriptor(fs, rootdir, htableDescriptor, false);
660   }
661 
662   /**
663    * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
664    * forceCreation is true then even if previous table descriptor is present it
665    * will be overwritten
666    * 
667    * @param fs
668    * @param htableDescriptor
669    * @param rootdir
670    * @param forceCreation
671    * @return True if we successfully created file.
672    */
673   public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
674       HTableDescriptor htableDescriptor, boolean forceCreation)
675   throws IOException {
676     Path tabledir = FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString());
677     return createTableDescriptorForTableDirectory(fs, tabledir, htableDescriptor, forceCreation);
678   }
679 
680   /**
681    * Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
682    * a new table or snapshot a table.
683    * @param fs filesystem where the descriptor should be written
684    * @param tabledir directory under which we should write the file
685    * @param htableDescriptor description of the table to write
686    * @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
687    *          be overwritten
688    * @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file
689    *         already exists and we weren't forcing the descriptor creation.
690    * @throws IOException if a filesystem error occurs
691    */
692   public static boolean createTableDescriptorForTableDirectory(FileSystem fs, Path tabledir,
693       HTableDescriptor htableDescriptor, boolean forceCreation) throws IOException {
694     FileStatus status = getTableInfoPath(fs, tabledir);
695     if (status != null) {
696       LOG.info("Current tableInfoPath = " + status.getPath());
697       if (!forceCreation) {
698         if (fs.exists(status.getPath()) && status.getLen() > 0) {
699           LOG.info("TableInfo already exists.. Skipping creation");
700           return false;
701         }
702       }
703     }
704     Path p = writeTableDescriptor(fs, htableDescriptor, tabledir, status);
705     return p != null;
706   }
707 }