001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.backup; 019 020import java.io.FileNotFoundException; 021import java.io.IOException; 022import java.util.ArrayList; 023import java.util.Arrays; 024import java.util.Collection; 025import java.util.Collections; 026import java.util.List; 027 028import org.apache.hadoop.conf.Configuration; 029import org.apache.hadoop.fs.FileStatus; 030import org.apache.hadoop.fs.FileSystem; 031import org.apache.hadoop.fs.Path; 032import org.apache.hadoop.fs.PathFilter; 033import org.apache.hadoop.hbase.client.RegionInfo; 034import org.apache.hadoop.hbase.regionserver.HStoreFile; 035import org.apache.hadoop.hbase.util.Bytes; 036import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 037import org.apache.hadoop.hbase.util.FSUtils; 038import org.apache.hadoop.hbase.util.HFileArchiveUtil; 039import org.apache.hadoop.io.MultipleIOException; 040import org.apache.yetus.audience.InterfaceAudience; 041import org.slf4j.Logger; 042import org.slf4j.LoggerFactory; 043import org.apache.hbase.thirdparty.com.google.common.base.Function; 044import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; 045import org.apache.hbase.thirdparty.com.google.common.collect.Collections2; 046import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 047 048/** 049 * Utility class to handle the removal of HFiles (or the respective {@link HStoreFile StoreFiles}) 050 * for a HRegion from the {@link FileSystem}. The hfiles will be archived or deleted, depending on 051 * the state of the system. 052 */ 053@InterfaceAudience.Private 054public class HFileArchiver { 055 private static final Logger LOG = LoggerFactory.getLogger(HFileArchiver.class); 056 private static final String SEPARATOR = "."; 057 058 /** Number of retries in case of fs operation failure */ 059 private static final int DEFAULT_RETRIES_NUMBER = 3; 060 061 private static final Function<File, Path> FUNC_FILE_TO_PATH = 062 new Function<File, Path>() { 063 @Override 064 public Path apply(File file) { 065 return file == null ? null : file.getPath(); 066 } 067 }; 068 069 private HFileArchiver() { 070 // hidden ctor since this is just a util 071 } 072 073 /** 074 * @return True if the Region exits in the filesystem. 075 */ 076 public static boolean exists(Configuration conf, FileSystem fs, RegionInfo info) 077 throws IOException { 078 Path rootDir = FSUtils.getRootDir(conf); 079 Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, info); 080 return fs.exists(regionDir); 081 } 082 083 /** 084 * Cleans up all the files for a HRegion by archiving the HFiles to the archive directory 085 * @param conf the configuration to use 086 * @param fs the file system object 087 * @param info RegionInfo for region to be deleted 088 */ 089 public static void archiveRegion(Configuration conf, FileSystem fs, RegionInfo info) 090 throws IOException { 091 Path rootDir = FSUtils.getRootDir(conf); 092 archiveRegion(fs, rootDir, FSUtils.getTableDir(rootDir, info.getTable()), 093 FSUtils.getRegionDirFromRootDir(rootDir, info)); 094 } 095 096 /** 097 * Remove an entire region from the table directory via archiving the region's hfiles. 098 * @param fs {@link FileSystem} from which to remove the region 099 * @param rootdir {@link Path} to the root directory where hbase files are stored (for building 100 * the archive path) 101 * @param tableDir {@link Path} to where the table is being stored (for building the archive path) 102 * @param regionDir {@link Path} to where a region is being stored (for building the archive path) 103 * @return <tt>true</tt> if the region was sucessfully deleted. <tt>false</tt> if the filesystem 104 * operations could not complete. 105 * @throws IOException if the request cannot be completed 106 */ 107 public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir) 108 throws IOException { 109 LOG.debug("ARCHIVING {}", rootdir.toString()); 110 111 // otherwise, we archive the files 112 // make sure we can archive 113 if (tableDir == null || regionDir == null) { 114 LOG.error("No archive directory could be found because tabledir (" + tableDir 115 + ") or regiondir (" + regionDir + "was null. Deleting files instead."); 116 deleteRegionWithoutArchiving(fs, regionDir); 117 // we should have archived, but failed to. Doesn't matter if we deleted 118 // the archived files correctly or not. 119 return false; 120 } 121 122 // make sure the regiondir lives under the tabledir 123 Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString())); 124 Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir, 125 FSUtils.getTableName(tableDir), 126 regionDir.getName()); 127 128 FileStatusConverter getAsFile = new FileStatusConverter(fs); 129 // otherwise, we attempt to archive the store files 130 131 // build collection of just the store directories to archive 132 Collection<File> toArchive = new ArrayList<>(); 133 final PathFilter dirFilter = new FSUtils.DirFilter(fs); 134 PathFilter nonHidden = new PathFilter() { 135 @Override 136 public boolean accept(Path file) { 137 return dirFilter.accept(file) && !file.getName().toString().startsWith("."); 138 } 139 }; 140 FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden); 141 // if there no files, we can just delete the directory and return; 142 if (storeDirs == null) { 143 LOG.debug("Directory {} empty.", regionDir); 144 return deleteRegionWithoutArchiving(fs, regionDir); 145 } 146 147 // convert the files in the region to a File 148 toArchive.addAll(Lists.transform(Arrays.asList(storeDirs), getAsFile)); 149 LOG.debug("Archiving " + toArchive); 150 List<File> failedArchive = resolveAndArchive(fs, regionArchiveDir, toArchive, 151 EnvironmentEdgeManager.currentTime()); 152 if (!failedArchive.isEmpty()) { 153 throw new FailedArchiveException("Failed to archive/delete all the files for region:" 154 + regionDir.getName() + " into " + regionArchiveDir 155 + ". Something is probably awry on the filesystem.", 156 Collections2.transform(failedArchive, FUNC_FILE_TO_PATH)); 157 } 158 // if that was successful, then we delete the region 159 return deleteRegionWithoutArchiving(fs, regionDir); 160 } 161 162 /** 163 * Remove from the specified region the store files of the specified column family, 164 * either by archiving them or outright deletion 165 * @param fs the filesystem where the store files live 166 * @param conf {@link Configuration} to examine to determine the archive directory 167 * @param parent Parent region hosting the store files 168 * @param tableDir {@link Path} to where the table is being stored (for building the archive path) 169 * @param family the family hosting the store files 170 * @throws IOException if the files could not be correctly disposed. 171 */ 172 public static void archiveFamily(FileSystem fs, Configuration conf, 173 RegionInfo parent, Path tableDir, byte[] family) throws IOException { 174 Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family))); 175 archiveFamilyByFamilyDir(fs, conf, parent, familyDir, family); 176 } 177 178 /** 179 * Removes from the specified region the store files of the specified column family, 180 * either by archiving them or outright deletion 181 * @param fs the filesystem where the store files live 182 * @param conf {@link Configuration} to examine to determine the archive directory 183 * @param parent Parent region hosting the store files 184 * @param familyDir {@link Path} to where the family is being stored 185 * @param family the family hosting the store files 186 * @throws IOException if the files could not be correctly disposed. 187 */ 188 public static void archiveFamilyByFamilyDir(FileSystem fs, Configuration conf, 189 RegionInfo parent, Path familyDir, byte[] family) throws IOException { 190 FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir); 191 if (storeFiles == null) { 192 LOG.debug("No files to dispose of in {}, family={}", parent.getRegionNameAsString(), 193 Bytes.toString(family)); 194 return; 195 } 196 197 FileStatusConverter getAsFile = new FileStatusConverter(fs); 198 Collection<File> toArchive = Lists.transform(Arrays.asList(storeFiles), getAsFile); 199 Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, family); 200 201 // do the actual archive 202 List<File> failedArchive = resolveAndArchive(fs, storeArchiveDir, toArchive, 203 EnvironmentEdgeManager.currentTime()); 204 if (!failedArchive.isEmpty()){ 205 throw new FailedArchiveException("Failed to archive/delete all the files for region:" 206 + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family) 207 + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", 208 Collections2.transform(failedArchive, FUNC_FILE_TO_PATH)); 209 } 210 } 211 212 /** 213 * Remove the store files, either by archiving them or outright deletion 214 * @param conf {@link Configuration} to examine to determine the archive directory 215 * @param fs the filesystem where the store files live 216 * @param regionInfo {@link RegionInfo} of the region hosting the store files 217 * @param family the family hosting the store files 218 * @param compactedFiles files to be disposed of. No further reading of these files should be 219 * attempted; otherwise likely to cause an {@link IOException} 220 * @throws IOException if the files could not be correctly disposed. 221 */ 222 public static void archiveStoreFiles(Configuration conf, FileSystem fs, RegionInfo regionInfo, 223 Path tableDir, byte[] family, Collection<HStoreFile> compactedFiles) 224 throws IOException, FailedArchiveException { 225 226 // sometimes in testing, we don't have rss, so we need to check for that 227 if (fs == null) { 228 LOG.warn("Passed filesystem is null, so just deleting files without archiving for {}," + 229 "family={}", Bytes.toString(regionInfo.getRegionName()), Bytes.toString(family)); 230 deleteStoreFilesWithoutArchiving(compactedFiles); 231 return; 232 } 233 234 // short circuit if we don't have any files to delete 235 if (compactedFiles.isEmpty()) { 236 LOG.debug("No files to dispose of, done!"); 237 return; 238 } 239 240 // build the archive path 241 if (regionInfo == null || family == null) throw new IOException( 242 "Need to have a region and a family to archive from."); 243 244 Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family); 245 246 // make sure we don't archive if we can't and that the archive dir exists 247 if (!fs.mkdirs(storeArchiveDir)) { 248 throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:" 249 + Bytes.toString(family) + ", deleting compacted files instead."); 250 } 251 252 // otherwise we attempt to archive the store files 253 LOG.debug("Archiving compacted files."); 254 255 // Wrap the storefile into a File 256 StoreToFile getStorePath = new StoreToFile(fs); 257 Collection<File> storeFiles = Collections2.transform(compactedFiles, getStorePath); 258 259 // do the actual archive 260 List<File> failedArchive = resolveAndArchive(fs, storeArchiveDir, storeFiles, 261 EnvironmentEdgeManager.currentTime()); 262 263 if (!failedArchive.isEmpty()){ 264 throw new FailedArchiveException("Failed to archive/delete all the files for region:" 265 + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family) 266 + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", 267 Collections2.transform(failedArchive, FUNC_FILE_TO_PATH)); 268 } 269 } 270 271 /** 272 * Archive the store file 273 * @param fs the filesystem where the store files live 274 * @param regionInfo region hosting the store files 275 * @param conf {@link Configuration} to examine to determine the archive directory 276 * @param tableDir {@link Path} to where the table is being stored (for building the archive path) 277 * @param family the family hosting the store files 278 * @param storeFile file to be archived 279 * @throws IOException if the files could not be correctly disposed. 280 */ 281 public static void archiveStoreFile(Configuration conf, FileSystem fs, RegionInfo regionInfo, 282 Path tableDir, byte[] family, Path storeFile) throws IOException { 283 Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family); 284 // make sure we don't archive if we can't and that the archive dir exists 285 if (!fs.mkdirs(storeArchiveDir)) { 286 throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:" 287 + Bytes.toString(family) + ", deleting compacted files instead."); 288 } 289 290 // do the actual archive 291 long start = EnvironmentEdgeManager.currentTime(); 292 File file = new FileablePath(fs, storeFile); 293 if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) { 294 throw new IOException("Failed to archive/delete the file for region:" 295 + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family) 296 + " into " + storeArchiveDir + ". Something is probably awry on the filesystem."); 297 } 298 } 299 300 /** 301 * Resolve any conflict with an existing archive file via timestamp-append 302 * renaming of the existing file and then archive the passed in files. 303 * @param fs {@link FileSystem} on which to archive the files 304 * @param baseArchiveDir base archive directory to store the files. If any of 305 * the files to archive are directories, will append the name of the 306 * directory to the base archive directory name, creating a parallel 307 * structure. 308 * @param toArchive files/directories that need to be archvied 309 * @param start time the archiving started - used for resolving archive 310 * conflicts. 311 * @return the list of failed to archive files. 312 * @throws IOException if an unexpected file operation exception occurred 313 */ 314 private static List<File> resolveAndArchive(FileSystem fs, Path baseArchiveDir, 315 Collection<File> toArchive, long start) throws IOException { 316 // short circuit if no files to move 317 if (toArchive.isEmpty()) { 318 return Collections.emptyList(); 319 } 320 321 LOG.trace("Moving files to the archive directory {}", baseArchiveDir); 322 323 // make sure the archive directory exists 324 if (!fs.exists(baseArchiveDir)) { 325 if (!fs.mkdirs(baseArchiveDir)) { 326 throw new IOException("Failed to create the archive directory:" + baseArchiveDir 327 + ", quitting archive attempt."); 328 } 329 LOG.trace("Created archive directory {}", baseArchiveDir); 330 } 331 332 List<File> failures = new ArrayList<>(); 333 String startTime = Long.toString(start); 334 for (File file : toArchive) { 335 // if its a file archive it 336 try { 337 LOG.trace("Archiving {}", file); 338 if (file.isFile()) { 339 // attempt to archive the file 340 if (!resolveAndArchiveFile(baseArchiveDir, file, startTime)) { 341 LOG.warn("Couldn't archive " + file + " into backup directory: " + baseArchiveDir); 342 failures.add(file); 343 } 344 } else { 345 // otherwise its a directory and we need to archive all files 346 LOG.trace("{} is a directory, archiving children files", file); 347 // so we add the directory name to the one base archive 348 Path parentArchiveDir = new Path(baseArchiveDir, file.getName()); 349 // and then get all the files from that directory and attempt to 350 // archive those too 351 Collection<File> children = file.getChildren(); 352 failures.addAll(resolveAndArchive(fs, parentArchiveDir, children, start)); 353 } 354 } catch (IOException e) { 355 LOG.warn("Failed to archive {}", file, e); 356 failures.add(file); 357 } 358 } 359 return failures; 360 } 361 362 /** 363 * Attempt to archive the passed in file to the archive directory. 364 * <p> 365 * If the same file already exists in the archive, it is moved to a timestamped directory under 366 * the archive directory and the new file is put in its place. 367 * @param archiveDir {@link Path} to the directory that stores the archives of the hfiles 368 * @param currentFile {@link Path} to the original HFile that will be archived 369 * @param archiveStartTime time the archiving started, to resolve naming conflicts 370 * @return <tt>true</tt> if the file is successfully archived. <tt>false</tt> if there was a 371 * problem, but the operation still completed. 372 * @throws IOException on failure to complete {@link FileSystem} operations. 373 */ 374 private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile, 375 String archiveStartTime) throws IOException { 376 // build path as it should be in the archive 377 String filename = currentFile.getName(); 378 Path archiveFile = new Path(archiveDir, filename); 379 FileSystem fs = currentFile.getFileSystem(); 380 381 // if the file already exists in the archive, move that one to a timestamped backup. This is a 382 // really, really unlikely situtation, where we get the same name for the existing file, but 383 // is included just for that 1 in trillion chance. 384 if (fs.exists(archiveFile)) { 385 LOG.debug("{} already exists in archive, moving to timestamped backup and " + 386 "overwriting current.", archiveFile); 387 388 // move the archive file to the stamped backup 389 Path backedupArchiveFile = new Path(archiveDir, filename + SEPARATOR + archiveStartTime); 390 if (!fs.rename(archiveFile, backedupArchiveFile)) { 391 LOG.error("Could not rename archive file to backup: " + backedupArchiveFile 392 + ", deleting existing file in favor of newer."); 393 // try to delete the exisiting file, if we can't rename it 394 if (!fs.delete(archiveFile, false)) { 395 throw new IOException("Couldn't delete existing archive file (" + archiveFile 396 + ") or rename it to the backup file (" + backedupArchiveFile 397 + ") to make room for similarly named file."); 398 } 399 } 400 LOG.debug("Backed up archive file from " + archiveFile); 401 } 402 403 LOG.trace("No existing file in archive for {}, free to archive original file.", archiveFile); 404 405 // at this point, we should have a free spot for the archive file 406 boolean success = false; 407 for (int i = 0; !success && i < DEFAULT_RETRIES_NUMBER; ++i) { 408 if (i > 0) { 409 // Ensure that the archive directory exists. 410 // The previous "move to archive" operation has failed probably because 411 // the cleaner has removed our archive directory (HBASE-7643). 412 // (we're in a retry loop, so don't worry too much about the exception) 413 try { 414 if (!fs.exists(archiveDir)) { 415 if (fs.mkdirs(archiveDir)) { 416 LOG.debug("Created archive directory {}", archiveDir); 417 } 418 } 419 } catch (IOException e) { 420 LOG.warn("Failed to create directory {}", archiveDir, e); 421 } 422 } 423 424 try { 425 success = currentFile.moveAndClose(archiveFile); 426 } catch (FileNotFoundException fnfe) { 427 LOG.warn("Failed to archive " + currentFile + 428 " because it does not exist! Skipping and continuing on.", fnfe); 429 success = true; 430 } catch (IOException e) { 431 LOG.warn("Failed to archive " + currentFile + " on try #" + i, e); 432 success = false; 433 } 434 } 435 436 if (!success) { 437 LOG.error("Failed to archive " + currentFile); 438 return false; 439 } 440 441 LOG.debug("Archived from {} to {}", currentFile, archiveFile); 442 return true; 443 } 444 445 /** 446 * Without regard for backup, delete a region. Should be used with caution. 447 * @param regionDir {@link Path} to the region to be deleted. 448 * @param fs FileSystem from which to delete the region 449 * @return <tt>true</tt> on successful deletion, <tt>false</tt> otherwise 450 * @throws IOException on filesystem operation failure 451 */ 452 private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDir) 453 throws IOException { 454 if (fs.delete(regionDir, true)) { 455 LOG.debug("Deleted {}", regionDir); 456 return true; 457 } 458 LOG.debug("Failed to delete directory {}", regionDir); 459 return false; 460 } 461 462 /** 463 * Just do a simple delete of the given store files 464 * <p> 465 * A best effort is made to delete each of the files, rather than bailing on the first failure. 466 * <p> 467 * @param compactedFiles store files to delete from the file system. 468 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before 469 * throwing the exception, rather than failing at the first file. 470 */ 471 private static void deleteStoreFilesWithoutArchiving(Collection<HStoreFile> compactedFiles) 472 throws IOException { 473 LOG.debug("Deleting files without archiving."); 474 List<IOException> errors = new ArrayList<>(0); 475 for (HStoreFile hsf : compactedFiles) { 476 try { 477 hsf.deleteStoreFile(); 478 } catch (IOException e) { 479 LOG.error("Failed to delete {}", hsf.getPath()); 480 errors.add(e); 481 } 482 } 483 if (errors.size() > 0) { 484 throw MultipleIOException.createIOException(errors); 485 } 486 } 487 488 /** 489 * Adapt a type to match the {@link File} interface, which is used internally for handling 490 * archival/removal of files 491 * @param <T> type to adapt to the {@link File} interface 492 */ 493 private static abstract class FileConverter<T> implements Function<T, File> { 494 protected final FileSystem fs; 495 496 public FileConverter(FileSystem fs) { 497 this.fs = fs; 498 } 499 } 500 501 /** 502 * Convert a FileStatus to something we can manage in the archiving 503 */ 504 private static class FileStatusConverter extends FileConverter<FileStatus> { 505 public FileStatusConverter(FileSystem fs) { 506 super(fs); 507 } 508 509 @Override 510 public File apply(FileStatus input) { 511 return new FileablePath(fs, input.getPath()); 512 } 513 } 514 515 /** 516 * Convert the {@link HStoreFile} into something we can manage in the archive 517 * methods 518 */ 519 private static class StoreToFile extends FileConverter<HStoreFile> { 520 public StoreToFile(FileSystem fs) { 521 super(fs); 522 } 523 524 @Override 525 public File apply(HStoreFile input) { 526 return new FileableStoreFile(fs, input); 527 } 528 } 529 530 /** 531 * Wrapper to handle file operations uniformly 532 */ 533 private static abstract class File { 534 protected final FileSystem fs; 535 536 public File(FileSystem fs) { 537 this.fs = fs; 538 } 539 540 /** 541 * Delete the file 542 * @throws IOException on failure 543 */ 544 abstract void delete() throws IOException; 545 546 /** 547 * Check to see if this is a file or a directory 548 * @return <tt>true</tt> if it is a file, <tt>false</tt> otherwise 549 * @throws IOException on {@link FileSystem} connection error 550 */ 551 abstract boolean isFile() throws IOException; 552 553 /** 554 * @return if this is a directory, returns all the children in the 555 * directory, otherwise returns an empty list 556 * @throws IOException 557 */ 558 abstract Collection<File> getChildren() throws IOException; 559 560 /** 561 * close any outside readers of the file 562 * @throws IOException 563 */ 564 abstract void close() throws IOException; 565 566 /** 567 * @return the name of the file (not the full fs path, just the individual 568 * file name) 569 */ 570 abstract String getName(); 571 572 /** 573 * @return the path to this file 574 */ 575 abstract Path getPath(); 576 577 /** 578 * Move the file to the given destination 579 * @param dest 580 * @return <tt>true</tt> on success 581 * @throws IOException 582 */ 583 public boolean moveAndClose(Path dest) throws IOException { 584 this.close(); 585 Path p = this.getPath(); 586 return FSUtils.renameAndSetModifyTime(fs, p, dest); 587 } 588 589 /** 590 * @return the {@link FileSystem} on which this file resides 591 */ 592 public FileSystem getFileSystem() { 593 return this.fs; 594 } 595 596 @Override 597 public String toString() { 598 return this.getClass().getSimpleName() + ", " + getPath().toString(); 599 } 600 } 601 602 /** 603 * A {@link File} that wraps a simple {@link Path} on a {@link FileSystem}. 604 */ 605 private static class FileablePath extends File { 606 private final Path file; 607 private final FileStatusConverter getAsFile; 608 609 public FileablePath(FileSystem fs, Path file) { 610 super(fs); 611 this.file = file; 612 this.getAsFile = new FileStatusConverter(fs); 613 } 614 615 @Override 616 public void delete() throws IOException { 617 if (!fs.delete(file, true)) throw new IOException("Failed to delete:" + this.file); 618 } 619 620 @Override 621 public String getName() { 622 return file.getName(); 623 } 624 625 @Override 626 public Collection<File> getChildren() throws IOException { 627 if (fs.isFile(file)) return Collections.emptyList(); 628 return Collections2.transform(Arrays.asList(fs.listStatus(file)), getAsFile); 629 } 630 631 @Override 632 public boolean isFile() throws IOException { 633 return fs.isFile(file); 634 } 635 636 @Override 637 public void close() throws IOException { 638 // NOOP - files are implicitly closed on removal 639 } 640 641 @Override 642 Path getPath() { 643 return file; 644 } 645 } 646 647 /** 648 * {@link File} adapter for a {@link HStoreFile} living on a {@link FileSystem} 649 * . 650 */ 651 private static class FileableStoreFile extends File { 652 HStoreFile file; 653 654 public FileableStoreFile(FileSystem fs, HStoreFile store) { 655 super(fs); 656 this.file = store; 657 } 658 659 @Override 660 public void delete() throws IOException { 661 file.deleteStoreFile(); 662 } 663 664 @Override 665 public String getName() { 666 return file.getPath().getName(); 667 } 668 669 @Override 670 public boolean isFile() { 671 return true; 672 } 673 674 @Override 675 public Collection<File> getChildren() throws IOException { 676 // storefiles don't have children 677 return Collections.emptyList(); 678 } 679 680 @Override 681 public void close() throws IOException { 682 file.closeStoreFile(true); 683 } 684 685 @Override 686 Path getPath() { 687 return file.getPath(); 688 } 689 } 690}