View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.backup;
19  
20  import java.io.IOException;
21  import java.util.ArrayList;
22  import java.util.Arrays;
23  import java.util.Collection;
24  import java.util.Collections;
25  import java.util.List;
26  
27  import org.apache.commons.logging.Log;
28  import org.apache.commons.logging.LogFactory;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.fs.FileStatus;
31  import org.apache.hadoop.fs.FileSystem;
32  import org.apache.hadoop.fs.Path;
33  import org.apache.hadoop.fs.PathFilter;
34  import org.apache.hadoop.hbase.HBaseFileSystem;
35  import org.apache.hadoop.hbase.HRegionInfo;
36  import org.apache.hadoop.hbase.HTableDescriptor;
37  import org.apache.hadoop.hbase.regionserver.HRegion;
38  import org.apache.hadoop.hbase.regionserver.StoreFile;
39  import org.apache.hadoop.hbase.util.Bytes;
40  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
41  import org.apache.hadoop.hbase.util.FSUtils;
42  import org.apache.hadoop.hbase.util.HFileArchiveUtil;
43  import org.apache.hadoop.io.MultipleIOException;
44  
45  import com.google.common.base.Function;
46  import com.google.common.base.Preconditions;
47  import com.google.common.collect.Collections2;
48  import com.google.common.collect.Lists;
49  
50  /**
51   * Utility class to handle the removal of HFiles (or the respective {@link StoreFile StoreFiles})
52   * for a HRegion from the {@link FileSystem}. The hfiles will be archived or deleted, depending on
53   * the state of the system.
54   */
55  public class HFileArchiver {
56    private static final Log LOG = LogFactory.getLog(HFileArchiver.class);
57    private static final String SEPARATOR = ".";
58  
59    /** Number of retries in case of fs operation failure */
60    private static final int DEFAULT_RETRIES_NUMBER = 6;
61  
62    private HFileArchiver() {
63      // hidden ctor since this is just a util
64    }
65  
66    /**
67     * Cleans up all the files for a HRegion by archiving the HFiles to the
68     * archive directory
69     * @param conf the configuration to use
70     * @param fs the file system object
71     * @param info HRegionInfo for region to be deleted
72     * @throws IOException
73     */
74    public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info)
75        throws IOException {
76      Path rootDir = FSUtils.getRootDir(conf);
77      archiveRegion(fs, rootDir, HTableDescriptor.getTableDir(rootDir, info.getTableName()),
78        HRegion.getRegionDir(rootDir, info));
79    }
80  
81    /**
82     * Remove an entire region from the table directory via archiving the region's hfiles.
83     * @param fs {@link FileSystem} from which to remove the region
84     * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
85     *          the archive path)
86     * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
87     * @param regionDir {@link Path} to where a region is being stored (for building the archive path)
88     * @return <tt>true</tt> if the region was sucessfully deleted. <tt>false</tt> if the filesystem
89     *         operations could not complete.
90     * @throws IOException if the request cannot be completed
91     */
92    public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir)
93        throws IOException {
94      if (LOG.isDebugEnabled()) {
95        LOG.debug("ARCHIVING region " + regionDir.toString());
96      }
97  
98      // otherwise, we archive the files
99      // make sure we can archive
100     if (tableDir == null || regionDir == null) {
101       LOG.error("No archive directory could be found because tabledir (" + tableDir
102           + ") or regiondir (" + regionDir + "was null. Deleting files instead.");
103       deleteRegionWithoutArchiving(fs, regionDir);
104       // we should have archived, but failed to. Doesn't matter if we deleted
105       // the archived files correctly or not.
106       return false;
107     }
108 
109     // make sure the regiondir lives under the tabledir
110     Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
111     Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir, tableDir, regionDir);
112 
113     LOG.debug("Have an archive directory, preparing to move files");
114     FileStatusConverter getAsFile = new FileStatusConverter(fs);
115     // otherwise, we attempt to archive the store files
116 
117     // build collection of just the store directories to archive
118     Collection<File> toArchive = new ArrayList<File>();
119     final PathFilter dirFilter = new FSUtils.DirFilter(fs);
120     PathFilter nonHidden = new PathFilter() {
121       @Override
122       public boolean accept(Path file) {
123         return dirFilter.accept(file) && !file.getName().toString().startsWith(".");
124       }
125     };
126     FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden);
127     // if there no files, we can just delete the directory and return;
128     if (storeDirs == null) {
129       LOG.debug("Region directory (" + regionDir + ") was empty, just deleting and returning!");
130       return deleteRegionWithoutArchiving(fs, regionDir);
131     }
132 
133     // convert the files in the region to a File
134     toArchive.addAll(Lists.transform(Arrays.asList(storeDirs), getAsFile));
135     LOG.debug("Archiving:" + toArchive);
136     boolean success = false;
137     try {
138       success = resolveAndArchive(fs, regionArchiveDir, toArchive);
139     } catch (IOException e) {
140       LOG.error("Failed to archive: " + toArchive, e);
141       success = false;
142     }
143 
144     // if that was successful, then we delete the region
145     if (success) {
146       LOG.debug("Successfully resolved and archived, now can just delete region.");
147       return deleteRegionWithoutArchiving(fs, regionDir);
148     }
149 
150     throw new IOException("Received error when attempting to archive files (" + toArchive
151         + "), cannot delete region directory.");
152   }
153 
154   /**
155    * Remove from the specified region the store files of the specified column family,
156    * either by archiving them or outright deletion
157    * @param fs the filesystem where the store files live
158    * @param conf {@link Configuration} to examine to determine the archive directory
159    * @param parent Parent region hosting the store files
160    * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
161    * @param family the family hosting the store files
162    * @throws IOException if the files could not be correctly disposed.
163    */
164   public static void archiveFamily(FileSystem fs, Configuration conf,
165       HRegionInfo parent, Path tableDir, byte[] family) throws IOException {
166     Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family)));
167     FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir, null);
168     if (storeFiles == null) {
169       LOG.debug("No store files to dispose for region=" + parent.getRegionNameAsString() +
170           ", family=" + Bytes.toString(family));
171       return;
172     }
173 
174     FileStatusConverter getAsFile = new FileStatusConverter(fs);
175     Collection<File> toArchive = Lists.transform(Arrays.asList(storeFiles), getAsFile);
176     Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, tableDir, family);
177 
178     // do the actual archive
179     if (!resolveAndArchive(fs, storeArchiveDir, toArchive)) {
180       throw new IOException("Failed to archive/delete all the files for region:"
181           + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family)
182           + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
183     }
184   }
185 
186   /**
187    * Remove the store files, either by archiving them or outright deletion
188    * @param conf {@link Configuration} to examine to determine the archive directory
189    * @param fs the filesystem where the store files live
190    * @param parent Parent region hosting the store files
191    * @param family the family hosting the store files
192    * @param compactedFiles files to be disposed of. No further reading of these files should be
193    *          attempted; otherwise likely to cause an {@link IOException}
194    * @throws IOException if the files could not be correctly disposed.
195    */
196   public static void archiveStoreFiles(Configuration conf, FileSystem fs, HRegion parent,
197       byte[] family, Collection<StoreFile> compactedFiles) throws IOException {
198 
199     // sometimes in testing, we don't have rss, so we need to check for that
200     if (fs == null) {
201       LOG.warn("Passed filesystem is null, so just deleting the files without archiving for region:"
202           + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family));
203       deleteStoreFilesWithoutArchiving(compactedFiles);
204       return;
205     }
206 
207     // short circuit if we don't have any files to delete
208     if (compactedFiles.size() == 0) {
209       LOG.debug("No store files to dispose, done!");
210       return;
211     }
212 
213     // build the archive path
214     if (parent == null || family == null) throw new IOException(
215         "Need to have a parent region and a family to archive from.");
216 
217     Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, family);
218 
219     // make sure we don't archive if we can't and that the archive dir exists
220     if (!HBaseFileSystem.makeDirOnFileSystem(fs, storeArchiveDir)) {
221       throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
222           + Bytes.toString(family) + ", deleting compacted files instead.");
223     }
224 
225     // otherwise we attempt to archive the store files
226     LOG.debug("Archiving compacted store files.");
227 
228     // wrap the storefile into a File
229     StoreToFile getStorePath = new StoreToFile(fs);
230     Collection<File> storeFiles = Collections2.transform(compactedFiles, getStorePath);
231 
232     // do the actual archive
233     if (!resolveAndArchive(fs, storeArchiveDir, storeFiles)) {
234       throw new IOException("Failed to archive/delete all the files for region:"
235           + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family)
236           + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
237     }
238   }
239 
240   /**
241    * Archive the store file
242    * @param fs the filesystem where the store files live
243    * @param regionInfo region hosting the store files
244    * @param conf {@link Configuration} to examine to determine the archive directory
245    * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
246    * @param family the family hosting the store files
247    * @param storeFile file to be archived
248    * @throws IOException if the files could not be correctly disposed.
249    */
250   public static void archiveStoreFile(FileSystem fs, HRegionInfo regionInfo,
251       Configuration conf, Path tableDir, byte[] family, Path storeFile) throws IOException {
252     Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
253     // make sure we don't archive if we can't and that the archive dir exists
254     if (!HBaseFileSystem.makeDirOnFileSystem(fs, storeArchiveDir)) {
255       throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
256           + Bytes.toString(family) + ", deleting compacted files instead.");
257     }
258 
259     // do the actual archive
260     long start = EnvironmentEdgeManager.currentTimeMillis();
261     File file = new FileablePath(fs, storeFile);
262     if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
263       throw new IOException("Failed to archive/delete the file for region:"
264           + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family)
265           + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
266     }
267   }
268 
269   /**
270    * Archive the given files and resolve any conflicts with existing files via appending the time
271    * archiving started (so all conflicts in the same group have the same timestamp appended).
272    * <p>
273    * If any of the passed files to archive are directories, archives the all files under that
274    * directory. Archive directory structure for children is the base archive directory name + the
275    * parent directory and is built recursively is passed files are directories themselves.
276    * @param fs {@link FileSystem} on which to archive the files
277    * @param baseArchiveDir base archive directory to archive the given files
278    * @param toArchive files to be archived
279    * @return <tt>true</tt> on success, <tt>false</tt> otherwise
280    * @throws IOException on unexpected failure
281    */
282   private static boolean resolveAndArchive(FileSystem fs, Path baseArchiveDir,
283       Collection<File> toArchive) throws IOException {
284     LOG.debug("Starting to archive files:" + toArchive);
285     long start = EnvironmentEdgeManager.currentTimeMillis();
286     List<File> failures = resolveAndArchive(fs, baseArchiveDir, toArchive, start);
287 
288     // notify that some files were not archived.
289     // We can't delete the files otherwise snapshots or other backup system
290     // that relies on the archiver end up with data loss.
291     if (failures.size() > 0) {
292       LOG.warn("Failed to complete archive of: " + failures +
293         ". Those files are still in the original location, and they may slow down reads.");
294       return false;
295     }
296     return true;
297   }
298 
299   /**
300    * Resolve any conflict with an existing archive file via timestamp-append
301    * renaming of the existing file and then archive the passed in files.
302    * @param fs {@link FileSystem} on which to archive the files
303    * @param baseArchiveDir base archive directory to store the files. If any of
304    *          the files to archive are directories, will append the name of the
305    *          directory to the base archive directory name, creating a parallel
306    *          structure.
307    * @param toArchive files/directories that need to be archvied
308    * @param start time the archiving started - used for resolving archive
309    *          conflicts.
310    * @return the list of failed to archive files.
311    * @throws IOException if an unexpected file operation exception occured
312    */
313   private static List<File> resolveAndArchive(FileSystem fs, Path baseArchiveDir,
314       Collection<File> toArchive, long start) throws IOException {
315     // short circuit if no files to move
316     if (toArchive.size() == 0) return Collections.emptyList();
317 
318     LOG.debug("moving files to the archive directory: " + baseArchiveDir);
319 
320     // make sure the archive directory exists
321     if (!fs.exists(baseArchiveDir)) {
322       if (!HBaseFileSystem.makeDirOnFileSystem(fs, baseArchiveDir)) {
323         throw new IOException("Failed to create the archive directory:" + baseArchiveDir
324             + ", quitting archive attempt.");
325       }
326       LOG.debug("Created archive directory:" + baseArchiveDir);
327     }
328 
329     List<File> failures = new ArrayList<File>();
330     String startTime = Long.toString(start);
331     for (File file : toArchive) {
332       // if its a file archive it
333       try {
334         LOG.debug("Archiving:" + file);
335         if (file.isFile()) {
336           // attempt to archive the file
337           if (!resolveAndArchiveFile(baseArchiveDir, file, startTime)) {
338             LOG.warn("Couldn't archive " + file + " into backup directory: " + baseArchiveDir);
339             failures.add(file);
340           }
341         } else {
342           // otherwise its a directory and we need to archive all files
343           LOG.debug(file + " is a directory, archiving children files");
344           // so we add the directory name to the one base archive
345           Path parentArchiveDir = new Path(baseArchiveDir, file.getName());
346           // and then get all the files from that directory and attempt to
347           // archive those too
348           Collection<File> children = file.getChildren();
349           failures.addAll(resolveAndArchive(fs, parentArchiveDir, children, start));
350         }
351       } catch (IOException e) {
352         LOG.warn("Failed to archive file: " + file, e);
353         failures.add(file);
354       }
355     }
356     return failures;
357   }
358 
359   /**
360    * Attempt to archive the passed in file to the archive directory.
361    * <p>
362    * If the same file already exists in the archive, it is moved to a timestamped directory under
363    * the archive directory and the new file is put in its place.
364    * @param archiveDir {@link Path} to the directory that stores the archives of the hfiles
365    * @param currentFile {@link Path} to the original HFile that will be archived
366    * @param archiveStartTime time the archiving started, to resolve naming conflicts
367    * @return <tt>true</tt> if the file is successfully archived. <tt>false</tt> if there was a
368    *         problem, but the operation still completed.
369    * @throws IOException on failure to complete {@link FileSystem} operations.
370    */
371   private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile,
372       String archiveStartTime) throws IOException {
373     // build path as it should be in the archive
374     String filename = currentFile.getName();
375     Path archiveFile = new Path(archiveDir, filename);
376     FileSystem fs = currentFile.getFileSystem();
377 
378     // if the file already exists in the archive, move that one to a timestamped backup. This is a
379     // really, really unlikely situtation, where we get the same name for the existing file, but
380     // is included just for that 1 in trillion chance.
381     if (fs.exists(archiveFile)) {
382       if (LOG.isDebugEnabled()) {
383         LOG.debug("File:" + archiveFile + " already exists in archive, moving to "
384             + "timestamped backup and overwriting current.");
385       }
386 
387       // move the archive file to the stamped backup
388       Path backedupArchiveFile = new Path(archiveDir, filename + SEPARATOR + archiveStartTime);
389       if (!HBaseFileSystem.renameAndSetModifyTime(fs, archiveFile, backedupArchiveFile)) {
390         LOG.error("Could not rename archive file to backup: " + backedupArchiveFile
391             + ", deleting existing file in favor of newer.");
392         // try to delete the exisiting file, if we can't rename it
393         if (!HBaseFileSystem.deleteFileFromFileSystem(fs, archiveFile)) {
394           throw new IOException("Couldn't delete existing archive file (" + archiveFile
395               + ") or rename it to the backup file (" + backedupArchiveFile
396               + ") to make room for similarly named file.");
397         }
398       }
399       LOG.debug("Backed up archive file from: " + archiveFile);
400     }
401 
402     LOG.debug("No existing file in archive for:" + archiveFile +
403         ", free to archive original file.");
404 
405     // at this point, we should have a free spot for the archive file
406     boolean success = false;
407     for (int i = 0; !success && i < DEFAULT_RETRIES_NUMBER; ++i) {
408       if (i > 0) {
409         // Ensure that the archive directory exists.
410         // The previous "move to archive" operation has failed probably because
411         // the cleaner has removed our archive directory (HBASE-7643).
412         // (we're in a retry loop, so don't worry too much about the exception)
413         try {
414           if (!fs.exists(archiveDir)
415               && HBaseFileSystem.makeDirOnFileSystem(fs, archiveDir)) {
416             LOG.debug("Created archive directory:" + archiveDir);
417           }
418         } catch (IOException e) {
419           LOG.warn("Failed to create the archive directory: " + archiveDir, e);
420         }
421       }
422 
423       try {
424         success = currentFile.moveAndClose(archiveFile);
425       } catch (IOException e) {
426         LOG.warn("Failed to archive file: " + currentFile + " on try #" + i, e);
427         success = false;
428       }
429     }
430 
431     if (!success) {
432       LOG.error("Failed to archive file:" + currentFile);
433       return false;
434     }
435 
436     if (LOG.isDebugEnabled()) {
437       LOG.debug("Finished archiving file from: " + currentFile + ", to: " + archiveFile);
438     }
439     return true;
440   }
441 
442   /**
443    * Simple delete of regular files from the {@link FileSystem}.
444    * <p>
445    * This method is a more generic implementation that the other deleteXXX
446    * methods in this class, allowing more code reuse at the cost of a couple
447    * more, short-lived objects (which should have minimum impact on the jvm).
448    * @param fs {@link FileSystem} where the files live
449    * @param files {@link Collection} of files to be deleted
450    * @throws IOException if a file cannot be deleted. All files will be
451    *           attempted to deleted before throwing the exception, rather than
452    *           failing at the first file.
453    */
454   private static void deleteFilesWithoutArchiving(Collection<File> files) throws IOException {
455     List<IOException> errors = new ArrayList<IOException>(0);
456     for (File file : files) {
457       try {
458         LOG.debug("Deleting region file:" + file);
459         file.delete();
460       } catch (IOException e) {
461         LOG.error("Failed to delete file:" + file);
462         errors.add(e);
463       }
464     }
465     if (errors.size() > 0) {
466       throw MultipleIOException.createIOException(errors);
467     }
468   }
469 
470   /**
471    * Without regard for backup, delete a region. Should be used with caution.
472    * @param regionDir {@link Path} to the region to be deleted.
473    * @param fs FileSystem from which to delete the region
474    * @return <tt>true</tt> on successful deletion, <tt>false</tt> otherwise
475    * @throws IOException on filesystem operation failure
476    */
477   private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDir)
478       throws IOException {
479     if (HBaseFileSystem.deleteDirFromFileSystem(fs, regionDir)) {
480       LOG.debug("Deleted all region files in: " + regionDir);
481       return true;
482     }
483     LOG.debug("Failed to delete region directory:" + regionDir);
484     return false;
485   }
486 
487   /**
488    * Just do a simple delete of the given store files
489    * <p>
490    * A best effort is made to delete each of the files, rather than bailing on the first failure.
491    * <p>
492    * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
493    * less resources, but is limited in terms of usefulness
494    * @param compactedFiles store files to delete from the file system.
495    * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
496    *           throwing the exception, rather than failing at the first file.
497    */
498   private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> compactedFiles)
499       throws IOException {
500     LOG.debug("Deleting store files without archiving.");
501     List<IOException> errors = new ArrayList<IOException>(0);
502     for (StoreFile hsf : compactedFiles) {
503       try {
504         hsf.deleteReader();
505       } catch (IOException e) {
506         LOG.error("Failed to delete store file:" + hsf.getPath());
507         errors.add(e);
508       }
509     }
510     if (errors.size() > 0) {
511       throw MultipleIOException.createIOException(errors);
512     }
513   }
514 
515   /**
516    * Adapt a type to match the {@link File} interface, which is used internally for handling
517    * archival/removal of files
518    * @param <T> type to adapt to the {@link File} interface
519    */
520   private static abstract class FileConverter<T> implements Function<T, File> {
521     protected final FileSystem fs;
522 
523     public FileConverter(FileSystem fs) {
524       this.fs = fs;
525     }
526   }
527 
528   /**
529    * Convert a FileStatus to something we can manage in the archiving
530    */
531   private static class FileStatusConverter extends FileConverter<FileStatus> {
532     public FileStatusConverter(FileSystem fs) {
533       super(fs);
534     }
535 
536     @Override
537     public File apply(FileStatus input) {
538       return new FileablePath(fs, input.getPath());
539     }
540   }
541 
542   /**
543    * Convert the {@link StoreFile} into something we can manage in the archive
544    * methods
545    */
546   private static class StoreToFile extends FileConverter<StoreFile> {
547     public StoreToFile(FileSystem fs) {
548       super(fs);
549     }
550 
551     @Override
552     public File apply(StoreFile input) {
553       return new FileableStoreFile(fs, input);
554     }
555   }
556 
557   /**
558    * Wrapper to handle file operations uniformly
559    */
560   private static abstract class File {
561     protected final FileSystem fs;
562 
563     public File(FileSystem fs) {
564       this.fs = fs;
565     }
566 
567     /**
568      * Delete the file
569      * @throws IOException on failure
570      */
571     abstract void delete() throws IOException;
572 
573     /**
574      * Check to see if this is a file or a directory
575      * @return <tt>true</tt> if it is a file, <tt>false</tt> otherwise
576      * @throws IOException on {@link FileSystem} connection error
577      */
578     abstract boolean isFile() throws IOException;
579 
580     /**
581      * @return if this is a directory, returns all the children in the
582      *         directory, otherwise returns an empty list
583      * @throws IOException
584      */
585     abstract Collection<File> getChildren() throws IOException;
586 
587     /**
588      * close any outside readers of the file
589      * @throws IOException
590      */
591     abstract void close() throws IOException;
592 
593     /**
594      * @return the name of the file (not the full fs path, just the individual
595      *         file name)
596      */
597     abstract String getName();
598 
599     /**
600      * @return the path to this file
601      */
602     abstract Path getPath();
603 
604     /**
605      * Move the file to the given destination
606      * @param dest
607      * @return <tt>true</tt> on success
608      * @throws IOException
609      */
610     public boolean moveAndClose(Path dest) throws IOException {
611       this.close();
612       Path p = this.getPath();
613       return HBaseFileSystem.renameAndSetModifyTime(fs, p, dest);
614     }
615 
616     /**
617      * @return the {@link FileSystem} on which this file resides
618      */
619     public FileSystem getFileSystem() {
620       return this.fs;
621     }
622 
623     @Override
624     public String toString() {
625       return this.getClass() + ", file:" + getPath().toString();
626     }
627   }
628 
629   /**
630    * A {@link File} that wraps a simple {@link Path} on a {@link FileSystem}.
631    */
632   private static class FileablePath extends File {
633     private final Path file;
634     private final FileStatusConverter getAsFile;
635 
636     public FileablePath(FileSystem fs, Path file) {
637       super(fs);
638       this.file = file;
639       this.getAsFile = new FileStatusConverter(fs);
640     }
641 
642     @Override
643     public void delete() throws IOException {
644       if (!HBaseFileSystem.deleteDirFromFileSystem(fs, file)) 
645         throw new IOException("Failed to delete:" + this.file);
646     }
647 
648     @Override
649     public String getName() {
650       return file.getName();
651     }
652 
653     @Override
654     public Collection<File> getChildren() throws IOException {
655       if (fs.isFile(file)) return Collections.emptyList();
656       return Collections2.transform(Arrays.asList(fs.listStatus(file)), getAsFile);
657     }
658 
659     @Override
660     public boolean isFile() throws IOException {
661       return fs.isFile(file);
662     }
663 
664     @Override
665     public void close() throws IOException {
666       // NOOP - files are implicitly closed on removal
667     }
668 
669     @Override
670     Path getPath() {
671       return file;
672     }
673   }
674 
675   /**
676    * {@link File} adapter for a {@link StoreFile} living on a {@link FileSystem}
677    * .
678    */
679   private static class FileableStoreFile extends File {
680     StoreFile file;
681 
682     public FileableStoreFile(FileSystem fs, StoreFile store) {
683       super(fs);
684       this.file = store;
685     }
686 
687     @Override
688     public void delete() throws IOException {
689       file.deleteReader();
690     }
691 
692     @Override
693     public String getName() {
694       return file.getPath().getName();
695     }
696 
697     @Override
698     public boolean isFile() {
699       return true;
700     }
701 
702     @Override
703     public Collection<File> getChildren() throws IOException {
704       // storefiles don't have children
705       return Collections.emptyList();
706     }
707 
708     @Override
709     public void close() throws IOException {
710       file.closeReader(true);
711     }
712 
713     @Override
714     Path getPath() {
715       return file.getPath();
716     }
717   }
718 }