View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.backup;
19  
20  import java.io.IOException;
21  import java.util.ArrayList;
22  import java.util.Arrays;
23  import java.util.Collection;
24  import java.util.Collections;
25  import java.util.List;
26  
27  import org.apache.commons.logging.Log;
28  import org.apache.commons.logging.LogFactory;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.fs.FileStatus;
31  import org.apache.hadoop.fs.FileSystem;
32  import org.apache.hadoop.fs.Path;
33  import org.apache.hadoop.fs.PathFilter;
34  import org.apache.hadoop.hbase.HRegionInfo;
35  import org.apache.hadoop.hbase.regionserver.HRegion;
36  import org.apache.hadoop.hbase.regionserver.StoreFile;
37  import org.apache.hadoop.hbase.util.Bytes;
38  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
39  import org.apache.hadoop.hbase.util.FSUtils;
40  import org.apache.hadoop.hbase.util.HFileArchiveUtil;
41  import org.apache.hadoop.io.MultipleIOException;
42  
43  import com.google.common.base.Function;
44  import com.google.common.base.Preconditions;
45  import com.google.common.collect.Collections2;
46  import com.google.common.collect.Lists;
47  
48  /**
49   * Utility class to handle the removal of HFiles (or the respective {@link StoreFile StoreFiles})
50   * for a HRegion from the {@link FileSystem}. The hfiles will be archived or deleted, depending on
51   * the state of the system.
52   */
53  public class HFileArchiver {
54    private static final Log LOG = LogFactory.getLog(HFileArchiver.class);
55    private static final String SEPARATOR = ".";
56  
57    /** Number of retries in case of fs operation failure */
58    private static final int DEFAULT_RETRIES_NUMBER = 3;
59  
60    private HFileArchiver() {
61      // hidden ctor since this is just a util
62    }
63  
64    /**
65     * Cleans up all the files for a HRegion by archiving the HFiles to the
66     * archive directory
67     * @param conf the configuration to use
68     * @param fs the file system object
69     * @param info HRegionInfo for region to be deleted
70     * @throws IOException
71     */
72    public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info)
73        throws IOException {
74      Path rootDir = FSUtils.getRootDir(conf);
75      archiveRegion(fs, rootDir, FSUtils.getTableDir(rootDir, info.getTable()),
76        HRegion.getRegionDir(rootDir, info));
77    }
78  
79    /**
80     * Remove an entire region from the table directory via archiving the region's hfiles.
81     * @param fs {@link FileSystem} from which to remove the region
82     * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
83     *          the archive path)
84     * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
85     * @param regionDir {@link Path} to where a region is being stored (for building the archive path)
86     * @return <tt>true</tt> if the region was sucessfully deleted. <tt>false</tt> if the filesystem
87     *         operations could not complete.
88     * @throws IOException if the request cannot be completed
89     */
90    public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir)
91        throws IOException {
92      if (LOG.isDebugEnabled()) {
93        LOG.debug("ARCHIVING " + regionDir.toString());
94      }
95  
96      // otherwise, we archive the files
97      // make sure we can archive
98      if (tableDir == null || regionDir == null) {
99        LOG.error("No archive directory could be found because tabledir (" + tableDir
100           + ") or regiondir (" + regionDir + "was null. Deleting files instead.");
101       deleteRegionWithoutArchiving(fs, regionDir);
102       // we should have archived, but failed to. Doesn't matter if we deleted
103       // the archived files correctly or not.
104       return false;
105     }
106 
107     // make sure the regiondir lives under the tabledir
108     Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
109     Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir,
110         FSUtils.getTableName(tableDir),
111         regionDir.getName());
112 
113     FileStatusConverter getAsFile = new FileStatusConverter(fs);
114     // otherwise, we attempt to archive the store files
115 
116     // build collection of just the store directories to archive
117     Collection<File> toArchive = new ArrayList<File>();
118     final PathFilter dirFilter = new FSUtils.DirFilter(fs);
119     PathFilter nonHidden = new PathFilter() {
120       @Override
121       public boolean accept(Path file) {
122         return dirFilter.accept(file) && !file.getName().toString().startsWith(".");
123       }
124     };
125     FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden);
126     // if there no files, we can just delete the directory and return;
127     if (storeDirs == null) {
128       LOG.debug("Region directory (" + regionDir + ") was empty, just deleting and returning!");
129       return deleteRegionWithoutArchiving(fs, regionDir);
130     }
131 
132     // convert the files in the region to a File
133     toArchive.addAll(Lists.transform(Arrays.asList(storeDirs), getAsFile));
134     LOG.debug("Archiving " + toArchive);
135     boolean success = false;
136     try {
137       success = resolveAndArchive(fs, regionArchiveDir, toArchive);
138     } catch (IOException e) {
139       LOG.error("Failed to archive " + toArchive, e);
140       success = false;
141     }
142 
143     // if that was successful, then we delete the region
144     if (success) {
145       return deleteRegionWithoutArchiving(fs, regionDir);
146     }
147 
148     throw new IOException("Received error when attempting to archive files (" + toArchive
149         + "), cannot delete region directory. ");
150   }
151 
152   /**
153    * Remove from the specified region the store files of the specified column family,
154    * either by archiving them or outright deletion
155    * @param fs the filesystem where the store files live
156    * @param conf {@link Configuration} to examine to determine the archive directory
157    * @param parent Parent region hosting the store files
158    * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
159    * @param family the family hosting the store files
160    * @throws IOException if the files could not be correctly disposed.
161    */
162   public static void archiveFamily(FileSystem fs, Configuration conf,
163       HRegionInfo parent, Path tableDir, byte[] family) throws IOException {
164     Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family)));
165     FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
166     if (storeFiles == null) {
167       LOG.debug("No store files to dispose for region=" + parent.getRegionNameAsString() +
168           ", family=" + Bytes.toString(family));
169       return;
170     }
171 
172     FileStatusConverter getAsFile = new FileStatusConverter(fs);
173     Collection<File> toArchive = Lists.transform(Arrays.asList(storeFiles), getAsFile);
174     Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, tableDir, family);
175 
176     // do the actual archive
177     if (!resolveAndArchive(fs, storeArchiveDir, toArchive)) {
178       throw new IOException("Failed to archive/delete all the files for region:"
179           + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family)
180           + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
181     }
182   }
183 
184   /**
185    * Remove the store files, either by archiving them or outright deletion
186    * @param conf {@link Configuration} to examine to determine the archive directory
187    * @param fs the filesystem where the store files live
188    * @param regionInfo {@link HRegionInfo} of the region hosting the store files
189    * @param family the family hosting the store files
190    * @param compactedFiles files to be disposed of. No further reading of these files should be
191    *          attempted; otherwise likely to cause an {@link IOException}
192    * @throws IOException if the files could not be correctly disposed.
193    */
194   public static void archiveStoreFiles(Configuration conf, FileSystem fs, HRegionInfo regionInfo,
195       Path tableDir, byte[] family, Collection<StoreFile> compactedFiles) throws IOException {
196 
197     // sometimes in testing, we don't have rss, so we need to check for that
198     if (fs == null) {
199       LOG.warn("Passed filesystem is null, so just deleting the files without archiving for region:"
200           + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family));
201       deleteStoreFilesWithoutArchiving(compactedFiles);
202       return;
203     }
204 
205     // short circuit if we don't have any files to delete
206     if (compactedFiles.size() == 0) {
207       LOG.debug("No store files to dispose, done!");
208       return;
209     }
210 
211     // build the archive path
212     if (regionInfo == null || family == null) throw new IOException(
213         "Need to have a region and a family to archive from.");
214 
215     Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
216 
217     // make sure we don't archive if we can't and that the archive dir exists
218     if (!fs.mkdirs(storeArchiveDir)) {
219       throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
220           + Bytes.toString(family) + ", deleting compacted files instead.");
221     }
222 
223     // otherwise we attempt to archive the store files
224     if (LOG.isDebugEnabled()) LOG.debug("Archiving compacted store files.");
225 
226     // Wrap the storefile into a File
227     StoreToFile getStorePath = new StoreToFile(fs);
228     Collection<File> storeFiles = Collections2.transform(compactedFiles, getStorePath);
229 
230     // do the actual archive
231     if (!resolveAndArchive(fs, storeArchiveDir, storeFiles)) {
232       throw new IOException("Failed to archive/delete all the files for region:"
233           + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family)
234           + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
235     }
236   }
237 
238   /**
239    * Archive the store file
240    * @param fs the filesystem where the store files live
241    * @param regionInfo region hosting the store files
242    * @param conf {@link Configuration} to examine to determine the archive directory
243    * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
244    * @param family the family hosting the store files
245    * @param storeFile file to be archived
246    * @throws IOException if the files could not be correctly disposed.
247    */
248   public static void archiveStoreFile(Configuration conf, FileSystem fs, HRegionInfo regionInfo,
249       Path tableDir, byte[] family, Path storeFile) throws IOException {
250     Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
251     // make sure we don't archive if we can't and that the archive dir exists
252     if (!fs.mkdirs(storeArchiveDir)) {
253       throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
254           + Bytes.toString(family) + ", deleting compacted files instead.");
255     }
256 
257     // do the actual archive
258     long start = EnvironmentEdgeManager.currentTime();
259     File file = new FileablePath(fs, storeFile);
260     if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
261       throw new IOException("Failed to archive/delete the file for region:"
262           + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family)
263           + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
264     }
265   }
266 
267   /**
268    * Archive the given files and resolve any conflicts with existing files via appending the time
269    * archiving started (so all conflicts in the same group have the same timestamp appended).
270    * <p>
271    * If any of the passed files to archive are directories, archives all the files under that
272    * directory. Archive directory structure for children is the base archive directory name + the
273    * parent directory and is built recursively is passed files are directories themselves.
274    * @param fs {@link FileSystem} on which to archive the files
275    * @param baseArchiveDir base archive directory to archive the given files
276    * @param toArchive files to be archived
277    * @return <tt>true</tt> on success, <tt>false</tt> otherwise
278    * @throws IOException on unexpected failure
279    */
280   private static boolean resolveAndArchive(FileSystem fs, Path baseArchiveDir,
281       Collection<File> toArchive) throws IOException {
282     if (LOG.isTraceEnabled()) LOG.trace("Starting to archive " + toArchive);
283     long start = EnvironmentEdgeManager.currentTime();
284     List<File> failures = resolveAndArchive(fs, baseArchiveDir, toArchive, start);
285 
286     // notify that some files were not archived.
287     // We can't delete the files otherwise snapshots or other backup system
288     // that relies on the archiver end up with data loss.
289     if (failures.size() > 0) {
290       LOG.warn("Failed to complete archive of: " + failures +
291         ". Those files are still in the original location, and they may slow down reads.");
292       return false;
293     }
294     return true;
295   }
296 
297   /**
298    * Resolve any conflict with an existing archive file via timestamp-append
299    * renaming of the existing file and then archive the passed in files.
300    * @param fs {@link FileSystem} on which to archive the files
301    * @param baseArchiveDir base archive directory to store the files. If any of
302    *          the files to archive are directories, will append the name of the
303    *          directory to the base archive directory name, creating a parallel
304    *          structure.
305    * @param toArchive files/directories that need to be archvied
306    * @param start time the archiving started - used for resolving archive
307    *          conflicts.
308    * @return the list of failed to archive files.
309    * @throws IOException if an unexpected file operation exception occured
310    */
311   private static List<File> resolveAndArchive(FileSystem fs, Path baseArchiveDir,
312       Collection<File> toArchive, long start) throws IOException {
313     // short circuit if no files to move
314     if (toArchive.size() == 0) return Collections.emptyList();
315 
316     if (LOG.isTraceEnabled()) LOG.trace("moving files to the archive directory: " + baseArchiveDir);
317 
318     // make sure the archive directory exists
319     if (!fs.exists(baseArchiveDir)) {
320       if (!fs.mkdirs(baseArchiveDir)) {
321         throw new IOException("Failed to create the archive directory:" + baseArchiveDir
322             + ", quitting archive attempt.");
323       }
324       if (LOG.isTraceEnabled()) LOG.trace("Created archive directory:" + baseArchiveDir);
325     }
326 
327     List<File> failures = new ArrayList<File>();
328     String startTime = Long.toString(start);
329     for (File file : toArchive) {
330       // if its a file archive it
331       try {
332         if (LOG.isTraceEnabled()) LOG.trace("Archiving: " + file);
333         if (file.isFile()) {
334           // attempt to archive the file
335           if (!resolveAndArchiveFile(baseArchiveDir, file, startTime)) {
336             LOG.warn("Couldn't archive " + file + " into backup directory: " + baseArchiveDir);
337             failures.add(file);
338           }
339         } else {
340           // otherwise its a directory and we need to archive all files
341           if (LOG.isTraceEnabled()) LOG.trace(file + " is a directory, archiving children files");
342           // so we add the directory name to the one base archive
343           Path parentArchiveDir = new Path(baseArchiveDir, file.getName());
344           // and then get all the files from that directory and attempt to
345           // archive those too
346           Collection<File> children = file.getChildren();
347           failures.addAll(resolveAndArchive(fs, parentArchiveDir, children, start));
348         }
349       } catch (IOException e) {
350         LOG.warn("Failed to archive " + file, e);
351         failures.add(file);
352       }
353     }
354     return failures;
355   }
356 
357   /**
358    * Attempt to archive the passed in file to the archive directory.
359    * <p>
360    * If the same file already exists in the archive, it is moved to a timestamped directory under
361    * the archive directory and the new file is put in its place.
362    * @param archiveDir {@link Path} to the directory that stores the archives of the hfiles
363    * @param currentFile {@link Path} to the original HFile that will be archived
364    * @param archiveStartTime time the archiving started, to resolve naming conflicts
365    * @return <tt>true</tt> if the file is successfully archived. <tt>false</tt> if there was a
366    *         problem, but the operation still completed.
367    * @throws IOException on failure to complete {@link FileSystem} operations.
368    */
369   private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile,
370       String archiveStartTime) throws IOException {
371     // build path as it should be in the archive
372     String filename = currentFile.getName();
373     Path archiveFile = new Path(archiveDir, filename);
374     FileSystem fs = currentFile.getFileSystem();
375 
376     // if the file already exists in the archive, move that one to a timestamped backup. This is a
377     // really, really unlikely situtation, where we get the same name for the existing file, but
378     // is included just for that 1 in trillion chance.
379     if (fs.exists(archiveFile)) {
380       if (LOG.isDebugEnabled()) {
381         LOG.debug("File:" + archiveFile + " already exists in archive, moving to "
382             + "timestamped backup and overwriting current.");
383       }
384 
385       // move the archive file to the stamped backup
386       Path backedupArchiveFile = new Path(archiveDir, filename + SEPARATOR + archiveStartTime);
387       if (!fs.rename(archiveFile, backedupArchiveFile)) {
388         LOG.error("Could not rename archive file to backup: " + backedupArchiveFile
389             + ", deleting existing file in favor of newer.");
390         // try to delete the exisiting file, if we can't rename it
391         if (!fs.delete(archiveFile, false)) {
392           throw new IOException("Couldn't delete existing archive file (" + archiveFile
393               + ") or rename it to the backup file (" + backedupArchiveFile
394               + ") to make room for similarly named file.");
395         }
396       }
397       LOG.debug("Backed up archive file from " + archiveFile);
398     }
399 
400     if (LOG.isTraceEnabled()) {
401       LOG.trace("No existing file in archive for: " + archiveFile +
402         ", free to archive original file.");
403     }
404 
405     // at this point, we should have a free spot for the archive file
406     boolean success = false;
407     for (int i = 0; !success && i < DEFAULT_RETRIES_NUMBER; ++i) {
408       if (i > 0) {
409         // Ensure that the archive directory exists.
410         // The previous "move to archive" operation has failed probably because
411         // the cleaner has removed our archive directory (HBASE-7643).
412         // (we're in a retry loop, so don't worry too much about the exception)
413         try {
414           if (!fs.exists(archiveDir)) {
415             if (fs.mkdirs(archiveDir)) {
416               LOG.debug("Created archive directory:" + archiveDir);
417             }
418           }
419         } catch (IOException e) {
420           LOG.warn("Failed to create directory: " + archiveDir, e);
421         }
422       }
423 
424       try {
425         success = currentFile.moveAndClose(archiveFile);
426       } catch (IOException e) {
427         LOG.warn("Failed to archive " + currentFile + " on try #" + i, e);
428         success = false;
429       }
430     }
431 
432     if (!success) {
433       LOG.error("Failed to archive " + currentFile);
434       return false;
435     }
436 
437     if (LOG.isDebugEnabled()) {
438       LOG.debug("Finished archiving from " + currentFile + ", to " + archiveFile);
439     }
440     return true;
441   }
442 
443   /**
444    * Without regard for backup, delete a region. Should be used with caution.
445    * @param regionDir {@link Path} to the region to be deleted.
446    * @param fs FileSystem from which to delete the region
447    * @return <tt>true</tt> on successful deletion, <tt>false</tt> otherwise
448    * @throws IOException on filesystem operation failure
449    */
450   private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDir)
451       throws IOException {
452     if (fs.delete(regionDir, true)) {
453       LOG.debug("Deleted all region files in: " + regionDir);
454       return true;
455     }
456     LOG.debug("Failed to delete region directory:" + regionDir);
457     return false;
458   }
459 
460   /**
461    * Just do a simple delete of the given store files
462    * <p>
463    * A best effort is made to delete each of the files, rather than bailing on the first failure.
464    * <p>
465    * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
466    * less resources, but is limited in terms of usefulness
467    * @param compactedFiles store files to delete from the file system.
468    * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
469    *           throwing the exception, rather than failing at the first file.
470    */
471   private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> compactedFiles)
472       throws IOException {
473     LOG.debug("Deleting store files without archiving.");
474     List<IOException> errors = new ArrayList<IOException>(0);
475     for (StoreFile hsf : compactedFiles) {
476       try {
477         hsf.deleteReader();
478       } catch (IOException e) {
479         LOG.error("Failed to delete store file:" + hsf.getPath());
480         errors.add(e);
481       }
482     }
483     if (errors.size() > 0) {
484       throw MultipleIOException.createIOException(errors);
485     }
486   }
487 
488   /**
489    * Adapt a type to match the {@link File} interface, which is used internally for handling
490    * archival/removal of files
491    * @param <T> type to adapt to the {@link File} interface
492    */
493   private static abstract class FileConverter<T> implements Function<T, File> {
494     protected final FileSystem fs;
495 
496     public FileConverter(FileSystem fs) {
497       this.fs = fs;
498     }
499   }
500 
501   /**
502    * Convert a FileStatus to something we can manage in the archiving
503    */
504   private static class FileStatusConverter extends FileConverter<FileStatus> {
505     public FileStatusConverter(FileSystem fs) {
506       super(fs);
507     }
508 
509     @Override
510     public File apply(FileStatus input) {
511       return new FileablePath(fs, input.getPath());
512     }
513   }
514 
515   /**
516    * Convert the {@link StoreFile} into something we can manage in the archive
517    * methods
518    */
519   private static class StoreToFile extends FileConverter<StoreFile> {
520     public StoreToFile(FileSystem fs) {
521       super(fs);
522     }
523 
524     @Override
525     public File apply(StoreFile input) {
526       return new FileableStoreFile(fs, input);
527     }
528   }
529 
530   /**
531    * Wrapper to handle file operations uniformly
532    */
533   private static abstract class File {
534     protected final FileSystem fs;
535 
536     public File(FileSystem fs) {
537       this.fs = fs;
538     }
539 
540     /**
541      * Delete the file
542      * @throws IOException on failure
543      */
544     abstract void delete() throws IOException;
545 
546     /**
547      * Check to see if this is a file or a directory
548      * @return <tt>true</tt> if it is a file, <tt>false</tt> otherwise
549      * @throws IOException on {@link FileSystem} connection error
550      */
551     abstract boolean isFile() throws IOException;
552 
553     /**
554      * @return if this is a directory, returns all the children in the
555      *         directory, otherwise returns an empty list
556      * @throws IOException
557      */
558     abstract Collection<File> getChildren() throws IOException;
559 
560     /**
561      * close any outside readers of the file
562      * @throws IOException
563      */
564     abstract void close() throws IOException;
565 
566     /**
567      * @return the name of the file (not the full fs path, just the individual
568      *         file name)
569      */
570     abstract String getName();
571 
572     /**
573      * @return the path to this file
574      */
575     abstract Path getPath();
576 
577     /**
578      * Move the file to the given destination
579      * @param dest
580      * @return <tt>true</tt> on success
581      * @throws IOException
582      */
583     public boolean moveAndClose(Path dest) throws IOException {
584       this.close();
585       Path p = this.getPath();
586       return FSUtils.renameAndSetModifyTime(fs, p, dest);
587     }
588 
589     /**
590      * @return the {@link FileSystem} on which this file resides
591      */
592     public FileSystem getFileSystem() {
593       return this.fs;
594     }
595 
596     @Override
597     public String toString() {
598       return this.getClass() + ", file:" + getPath().toString();
599     }
600   }
601 
602   /**
603    * A {@link File} that wraps a simple {@link Path} on a {@link FileSystem}.
604    */
605   private static class FileablePath extends File {
606     private final Path file;
607     private final FileStatusConverter getAsFile;
608 
609     public FileablePath(FileSystem fs, Path file) {
610       super(fs);
611       this.file = file;
612       this.getAsFile = new FileStatusConverter(fs);
613     }
614 
615     @Override
616     public void delete() throws IOException {
617       if (!fs.delete(file, true)) throw new IOException("Failed to delete:" + this.file);
618     }
619 
620     @Override
621     public String getName() {
622       return file.getName();
623     }
624 
625     @Override
626     public Collection<File> getChildren() throws IOException {
627       if (fs.isFile(file)) return Collections.emptyList();
628       return Collections2.transform(Arrays.asList(fs.listStatus(file)), getAsFile);
629     }
630 
631     @Override
632     public boolean isFile() throws IOException {
633       return fs.isFile(file);
634     }
635 
636     @Override
637     public void close() throws IOException {
638       // NOOP - files are implicitly closed on removal
639     }
640 
641     @Override
642     Path getPath() {
643       return file;
644     }
645   }
646 
647   /**
648    * {@link File} adapter for a {@link StoreFile} living on a {@link FileSystem}
649    * .
650    */
651   private static class FileableStoreFile extends File {
652     StoreFile file;
653 
654     public FileableStoreFile(FileSystem fs, StoreFile store) {
655       super(fs);
656       this.file = store;
657     }
658 
659     @Override
660     public void delete() throws IOException {
661       file.deleteReader();
662     }
663 
664     @Override
665     public String getName() {
666       return file.getPath().getName();
667     }
668 
669     @Override
670     public boolean isFile() {
671       return true;
672     }
673 
674     @Override
675     public Collection<File> getChildren() throws IOException {
676       // storefiles don't have children
677       return Collections.emptyList();
678     }
679 
680     @Override
681     public void close() throws IOException {
682       file.closeReader(true);
683     }
684 
685     @Override
686     Path getPath() {
687       return file.getPath();
688     }
689   }
690 }