001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.apache.hadoop.hbase.io.HFileLink.LINK_NAME_PATTERN;
021
022import edu.umd.cs.findbugs.annotations.Nullable;
023import java.io.FileNotFoundException;
024import java.io.IOException;
025import java.io.InterruptedIOException;
026import java.util.ArrayList;
027import java.util.Collection;
028import java.util.HashMap;
029import java.util.List;
030import java.util.Map;
031import java.util.Objects;
032import java.util.Optional;
033import java.util.UUID;
034import java.util.regex.Matcher;
035import org.apache.hadoop.conf.Configuration;
036import org.apache.hadoop.fs.FSDataInputStream;
037import org.apache.hadoop.fs.FSDataOutputStream;
038import org.apache.hadoop.fs.FileStatus;
039import org.apache.hadoop.fs.FileSystem;
040import org.apache.hadoop.fs.FileUtil;
041import org.apache.hadoop.fs.LocatedFileStatus;
042import org.apache.hadoop.fs.Path;
043import org.apache.hadoop.fs.permission.FsPermission;
044import org.apache.hadoop.hbase.Cell;
045import org.apache.hadoop.hbase.ExtendedCell;
046import org.apache.hadoop.hbase.HConstants;
047import org.apache.hadoop.hbase.PrivateCellUtil;
048import org.apache.hadoop.hbase.TableName;
049import org.apache.hadoop.hbase.backup.HFileArchiver;
050import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
051import org.apache.hadoop.hbase.client.RegionInfo;
052import org.apache.hadoop.hbase.client.TableDescriptor;
053import org.apache.hadoop.hbase.fs.HFileSystem;
054import org.apache.hadoop.hbase.io.HFileLink;
055import org.apache.hadoop.hbase.io.Reference;
056import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
057import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
058import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
059import org.apache.hadoop.hbase.util.Bytes;
060import org.apache.hadoop.hbase.util.CommonFSUtils;
061import org.apache.hadoop.hbase.util.FSUtils;
062import org.apache.hadoop.hbase.util.Pair;
063import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
064import org.apache.yetus.audience.InterfaceAudience;
065import org.slf4j.Logger;
066import org.slf4j.LoggerFactory;
067
068import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
069
070/**
071 * View to an on-disk Region. Provides the set of methods necessary to interact with the on-disk
072 * region data.
073 */
074@InterfaceAudience.Private
075public class HRegionFileSystem {
076  private static final Logger LOG = LoggerFactory.getLogger(HRegionFileSystem.class);
077
078  /** Name of the region info file that resides just under the region directory. */
079  public final static String REGION_INFO_FILE = ".regioninfo";
080
081  /** Temporary subdirectory of the region directory used for merges. */
082  public static final String REGION_MERGES_DIR = ".merges";
083
084  /** Temporary subdirectory of the region directory used for splits. */
085  public static final String REGION_SPLITS_DIR = ".splits";
086
087  /** Temporary subdirectory of the region directory used for compaction output. */
088  static final String REGION_TEMP_DIR = ".tmp";
089
090  private final RegionInfo regionInfo;
091  // regionInfo for interacting with FS (getting encodedName, etc)
092  final RegionInfo regionInfoForFs;
093  final Configuration conf;
094  private final Path tableDir;
095  final FileSystem fs;
096  private final Path regionDir;
097
098  /**
099   * In order to handle NN connectivity hiccups, one need to retry non-idempotent operation at the
100   * client level.
101   */
102  private final int hdfsClientRetriesNumber;
103  private final int baseSleepBeforeRetries;
104  private static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10;
105  private static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000;
106
107  /**
108   * Create a view to the on-disk region
109   * @param conf       the {@link Configuration} to use
110   * @param fs         {@link FileSystem} that contains the region
111   * @param tableDir   {@link Path} to where the table is being stored
112   * @param regionInfo {@link RegionInfo} for region
113   */
114  HRegionFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir,
115    final RegionInfo regionInfo) {
116    this.fs = fs;
117    this.conf = conf;
118    this.tableDir = Objects.requireNonNull(tableDir, "tableDir is null");
119    this.regionInfo = Objects.requireNonNull(regionInfo, "regionInfo is null");
120    this.regionInfoForFs = ServerRegionReplicaUtil.getRegionInfoForFs(regionInfo);
121    this.regionDir = FSUtils.getRegionDirFromTableDir(tableDir, regionInfo);
122    this.hdfsClientRetriesNumber =
123      conf.getInt("hdfs.client.retries.number", DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
124    this.baseSleepBeforeRetries =
125      conf.getInt("hdfs.client.sleep.before.retries", DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
126  }
127
128  /** Returns the underlying {@link FileSystem} */
129  public FileSystem getFileSystem() {
130    return this.fs;
131  }
132
133  /** Returns the {@link RegionInfo} that describe this on-disk region view */
134  public RegionInfo getRegionInfo() {
135    return this.regionInfo;
136  }
137
138  public RegionInfo getRegionInfoForFS() {
139    return this.regionInfoForFs;
140  }
141
142  /** Returns {@link Path} to the region's root directory. */
143  public Path getTableDir() {
144    return this.tableDir;
145  }
146
147  /** Returns {@link Path} to the region directory. */
148  public Path getRegionDir() {
149    return regionDir;
150  }
151
152  // ===========================================================================
153  // Temp Helpers
154  // ===========================================================================
155  /** Returns {@link Path} to the region's temp directory, used for file creations */
156  public Path getTempDir() {
157    return new Path(getRegionDir(), REGION_TEMP_DIR);
158  }
159
160  /**
161   * Clean up any temp detritus that may have been left around from previous operation attempts.
162   */
163  void cleanupTempDir() throws IOException {
164    deleteDir(getTempDir());
165  }
166
167  // ===========================================================================
168  // Store/StoreFile Helpers
169  // ===========================================================================
170  /**
171   * Returns the directory path of the specified family
172   * @param familyName Column Family Name
173   * @return {@link Path} to the directory of the specified family
174   */
175  public Path getStoreDir(final String familyName) {
176    return new Path(this.getRegionDir(), familyName);
177  }
178
179  /**
180   * @param tabledir {@link Path} to where the table is being stored
181   * @param hri      {@link RegionInfo} for the region.
182   * @param family   {@link ColumnFamilyDescriptor} describing the column family
183   * @return Path to family/Store home directory.
184   */
185  public static Path getStoreHomedir(final Path tabledir, final RegionInfo hri,
186    final byte[] family) {
187    return getStoreHomedir(tabledir, hri.getEncodedName(), family);
188  }
189
190  /**
191   * @param tabledir    {@link Path} to where the table is being stored
192   * @param encodedName Encoded region name.
193   * @param family      {@link ColumnFamilyDescriptor} describing the column family
194   * @return Path to family/Store home directory.
195   */
196  public static Path getStoreHomedir(final Path tabledir, final String encodedName,
197    final byte[] family) {
198    return new Path(tabledir, new Path(encodedName, Bytes.toString(family)));
199  }
200
201  /**
202   * Create the store directory for the specified family name
203   * @param familyName Column Family Name
204   * @return {@link Path} to the directory of the specified family
205   * @throws IOException if the directory creation fails.
206   */
207  Path createStoreDir(final String familyName) throws IOException {
208    Path storeDir = getStoreDir(familyName);
209    if (!fs.exists(storeDir) && !createDir(storeDir))
210      throw new IOException("Failed creating " + storeDir);
211    return storeDir;
212  }
213
214  /**
215   * Set the directory of CF to the specified storage policy. <br>
216   * <i>"LAZY_PERSIST"</i>, <i>"ALL_SSD"</i>, <i>"ONE_SSD"</i>, <i>"HOT"</i>, <i>"WARM"</i>,
217   * <i>"COLD"</i> <br>
218   * <br>
219   * See {@link org.apache.hadoop.hdfs.protocol.HdfsConstants} for more details.
220   * @param familyName The name of column family.
221   * @param policyName The name of the storage policy: 'HOT', 'COLD', etc. See hadoop 2.6+
222   *                   org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g 'COLD',
223   *                   'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.
224   */
225  public void setStoragePolicy(String familyName, String policyName) {
226    CommonFSUtils.setStoragePolicy(this.fs, getStoreDir(familyName), policyName);
227  }
228
229  /**
230   * Set storage policy for a whole region. <br>
231   * <i>"LAZY_PERSIST"</i>, <i>"ALL_SSD"</i>, <i>"ONE_SSD"</i>, <i>"HOT"</i>, <i>"WARM"</i>,
232   * <i>"COLD"</i> <br>
233   * <br>
234   * See {@link org.apache.hadoop.hdfs.protocol.HdfsConstants} for more details.
235   * @param policyName The name of the storage policy: 'HOT', 'COLD', etc. See hadoop 2.6+
236   *                   org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g 'COLD',
237   *                   'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.
238   */
239  public void setStoragePolicy(String policyName) {
240    CommonFSUtils.setStoragePolicy(this.fs, getRegionDir(), policyName);
241  }
242
243  /**
244   * Get the storage policy of the directory of CF.
245   * @param familyName The name of column family.
246   * @return Storage policy name, or {@code null} if not using {@link HFileSystem} or exception
247   *         thrown when trying to get policy
248   */
249  @Nullable
250  public String getStoragePolicyName(String familyName) {
251    if (this.fs instanceof HFileSystem) {
252      Path storeDir = getStoreDir(familyName);
253      return ((HFileSystem) this.fs).getStoragePolicyName(storeDir);
254    }
255
256    return null;
257  }
258
259  /**
260   * Returns the store files' LocatedFileStatus which available for the family. This methods
261   * performs the filtering based on the valid store files.
262   * @param familyName Column Family Name
263   * @return a list of store files' LocatedFileStatus for the specified family.
264   */
265  public static List<LocatedFileStatus> getStoreFilesLocatedStatus(final HRegionFileSystem regionfs,
266    final String familyName, final boolean validate) throws IOException {
267    Path familyDir = regionfs.getStoreDir(familyName);
268    List<LocatedFileStatus> locatedFileStatuses =
269      CommonFSUtils.listLocatedStatus(regionfs.getFileSystem(), familyDir);
270    if (locatedFileStatuses == null) {
271      if (LOG.isTraceEnabled()) {
272        LOG.trace("No StoreFiles for: " + familyDir);
273      }
274      return null;
275    }
276
277    List<LocatedFileStatus> validStoreFiles = Lists.newArrayList();
278    for (LocatedFileStatus status : locatedFileStatuses) {
279      if (validate && !StoreFileInfo.isValid(status)) {
280        // recovered.hfiles directory is expected inside CF path when hbase.wal.split.to.hfile to
281        // true, refer HBASE-23740
282        if (!HConstants.RECOVERED_HFILES_DIR.equals(status.getPath().getName())) {
283          LOG.warn("Invalid StoreFile: {}", status.getPath());
284        }
285      } else {
286        validStoreFiles.add(status);
287      }
288    }
289    return validStoreFiles;
290  }
291
292  /**
293   * Return Qualified Path of the specified family/file
294   * @param familyName Column Family Name
295   * @param fileName   File Name
296   * @return The qualified Path for the specified family/file
297   */
298  Path getStoreFilePath(final String familyName, final String fileName) {
299    Path familyDir = getStoreDir(familyName);
300    return new Path(familyDir, fileName).makeQualified(fs.getUri(), fs.getWorkingDirectory());
301  }
302
303  /**
304   * Return the store file information of the specified family/file.
305   * @param familyName Column Family Name
306   * @param fileName   File Name
307   * @return The {@link StoreFileInfo} for the specified family/file
308   */
309  StoreFileInfo getStoreFileInfo(final String familyName, final String fileName,
310    final StoreFileTracker tracker) throws IOException {
311    Path familyDir = getStoreDir(familyName);
312    return ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo, regionInfoForFs,
313      familyName, new Path(familyDir, fileName), tracker);
314  }
315
316  /** Returns the set of families present on disk n */
317  public Collection<String> getFamilies() throws IOException {
318    FileStatus[] fds =
319      CommonFSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
320    if (fds == null) return null;
321
322    ArrayList<String> families = new ArrayList<>(fds.length);
323    for (FileStatus status : fds) {
324      families.add(status.getPath().getName());
325    }
326
327    return families;
328  }
329
330  /**
331   * Remove the region family from disk, archiving the store files.
332   * @param familyName Column Family Name
333   * @throws IOException if an error occours during the archiving
334   */
335  public void deleteFamily(final String familyName) throws IOException {
336    // archive family store files
337    HFileArchiver.archiveFamily(fs, conf, regionInfoForFs, tableDir, Bytes.toBytes(familyName));
338
339    // delete the family folder
340    Path familyDir = getStoreDir(familyName);
341    if (fs.exists(familyDir) && !deleteDir(familyDir))
342      throw new IOException("Could not delete family " + familyName + " from FileSystem for region "
343        + regionInfoForFs.getRegionNameAsString() + "(" + regionInfoForFs.getEncodedName() + ")");
344  }
345
346  /**
347   * Generate a unique file name, used by createTempName() and commitStoreFile()
348   * @param suffix extra information to append to the generated name
349   * @return Unique file name
350   */
351  private static String generateUniqueName(final String suffix) {
352    String name = UUID.randomUUID().toString().replaceAll("-", "");
353    if (suffix != null) name += suffix;
354    return name;
355  }
356
357  /**
358   * Generate a unique temporary Path. Used in conjuction with commitStoreFile() to get a safer file
359   * creation. <code>
360   * Path file = fs.createTempName();
361   * ...StoreFile.Writer(file)...
362   * fs.commitStoreFile("family", file);
363   * </code>
364   * @return Unique {@link Path} of the temporary file
365   */
366  public Path createTempName() {
367    return createTempName(null);
368  }
369
370  /**
371   * Generate a unique temporary Path. Used in conjuction with commitStoreFile() to get a safer file
372   * creation. <code>
373   * Path file = fs.createTempName();
374   * ...StoreFile.Writer(file)...
375   * fs.commitStoreFile("family", file);
376   * </code>
377   * @param suffix extra information to append to the generated name
378   * @return Unique {@link Path} of the temporary file
379   */
380  public Path createTempName(final String suffix) {
381    return new Path(getTempDir(), generateUniqueName(suffix));
382  }
383
384  /**
385   * Move the file from a build/temp location to the main family store directory.
386   * @param familyName Family that will gain the file
387   * @param buildPath  {@link Path} to the file to commit.
388   * @return The new {@link Path} of the committed file
389   */
390  public Path commitStoreFile(final String familyName, final Path buildPath) throws IOException {
391    Path dstPath = preCommitStoreFile(familyName, buildPath, -1, false);
392    return commitStoreFile(buildPath, dstPath);
393  }
394
395  /**
396   * Generate the filename in the main family store directory for moving the file from a build/temp
397   * location.
398   * @param familyName      Family that will gain the file
399   * @param buildPath       {@link Path} to the file to commit.
400   * @param seqNum          Sequence Number to append to the file name (less then 0 if no sequence
401   *                        number)
402   * @param generateNewName False if you want to keep the buildPath name
403   * @return The new {@link Path} of the to be committed file
404   */
405  private Path preCommitStoreFile(final String familyName, final Path buildPath, final long seqNum,
406    final boolean generateNewName) throws IOException {
407    Path storeDir = getStoreDir(familyName);
408    if (!fs.exists(storeDir) && !createDir(storeDir))
409      throw new IOException("Failed creating " + storeDir);
410
411    String name = buildPath.getName();
412    if (generateNewName) {
413      name = generateUniqueName((seqNum < 0) ? null : StoreFileInfo.formatBulkloadSeqId(seqNum));
414    }
415    Path dstPath = new Path(storeDir, name);
416    if (!fs.exists(buildPath)) {
417      throw new FileNotFoundException(buildPath.toString());
418    }
419    if (LOG.isDebugEnabled()) {
420      LOG.debug("Committing " + buildPath + " as " + dstPath);
421    }
422    return dstPath;
423  }
424
425  /*
426   * Moves file from staging dir to region dir
427   * @param buildPath {@link Path} to the file to commit.
428   * @param dstPath {@link Path} to the file under region dir
429   * @return The {@link Path} of the committed file
430   */
431  Path commitStoreFile(final Path buildPath, Path dstPath) throws IOException {
432    // rename is not necessary in case of direct-insert stores
433    if (buildPath.equals(dstPath)) {
434      return dstPath;
435    }
436    // buildPath exists, therefore not doing an exists() check.
437    if (!rename(buildPath, dstPath)) {
438      throw new IOException("Failed rename of " + buildPath + " to " + dstPath);
439    }
440    return dstPath;
441  }
442
443  /**
444   * Bulk load: Add a specified store file to the specified family. If the source file is on the
445   * same different file-system is moved from the source location to the destination location,
446   * otherwise is copied over.
447   * @param familyName Family that will gain the file
448   * @param srcPath    {@link Path} to the file to import
449   * @param seqNum     Bulk Load sequence number
450   * @return The destination {@link Path} of the bulk loaded file
451   */
452  Pair<Path, Path> bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
453    throws IOException {
454    // Copy the file if it's on another filesystem
455    FileSystem srcFs = srcPath.getFileSystem(conf);
456    srcPath = srcFs.resolvePath(srcPath);
457    FileSystem realSrcFs = srcPath.getFileSystem(conf);
458    FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem) fs).getBackingFs() : fs;
459
460    // We can't compare FileSystem instances as equals() includes UGI instance
461    // as part of the comparison and won't work when doing SecureBulkLoad
462    // TODO deal with viewFS
463    if (!FSUtils.isSameHdfs(conf, realSrcFs, desFs)) {
464      LOG.info("Bulk-load file " + srcPath + " is on different filesystem than "
465        + "the destination store. Copying file over to destination filesystem.");
466      Path tmpPath = createTempName();
467      FileUtil.copy(realSrcFs, srcPath, fs, tmpPath, false, conf);
468      LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
469      srcPath = tmpPath;
470    }
471
472    return new Pair<>(srcPath, preCommitStoreFile(familyName, srcPath, seqNum, true));
473  }
474
475  // ===========================================================================
476  // Splits Helpers
477  // ===========================================================================
478
479  public Path getSplitsDir(final RegionInfo hri) {
480    return new Path(getTableDir(), hri.getEncodedName());
481  }
482
483  /**
484   * Remove daughter region
485   * @param regionInfo daughter {@link RegionInfo}
486   */
487  void cleanupDaughterRegion(final RegionInfo regionInfo) throws IOException {
488    Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
489    if (this.fs.exists(regionDir) && !deleteDir(regionDir)) {
490      throw new IOException("Failed delete of " + regionDir);
491    }
492  }
493
494  /**
495   * Commit a daughter region, moving it from the split temporary directory to the proper location
496   * in the filesystem.
497   * @param regionInfo daughter {@link org.apache.hadoop.hbase.client.RegionInfo}
498   */
499  public Path commitDaughterRegion(final RegionInfo regionInfo, List<StoreFileInfo> allRegionFiles,
500    MasterProcedureEnv env) throws IOException {
501    Path regionDir = this.getSplitsDir(regionInfo);
502    if (fs.exists(regionDir)) {
503      // Write HRI to a file in case we need to recover hbase:meta
504      Path regionInfoFile = new Path(regionDir, REGION_INFO_FILE);
505      byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
506      writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
507      HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
508        env.getMasterConfiguration(), fs, getTableDir(), regionInfo, false);
509      insertRegionFilesIntoStoreTracker(allRegionFiles, env, regionFs);
510    }
511    return regionDir;
512  }
513
514  private void insertRegionFilesIntoStoreTracker(List<StoreFileInfo> allFiles,
515    MasterProcedureEnv env, HRegionFileSystem regionFs) throws IOException {
516    TableDescriptor tblDesc =
517      env.getMasterServices().getTableDescriptors().get(regionInfo.getTable());
518    // we need to map trackers per store
519    Map<String, StoreFileTracker> trackerMap = new HashMap<>();
520    // we need to map store files per store
521    Map<String, List<StoreFileInfo>> fileInfoMap = new HashMap<>();
522    for (StoreFileInfo sfi : allFiles) {
523      Path file = sfi.getPath();
524      String familyName = file.getParent().getName();
525      trackerMap.computeIfAbsent(familyName, t -> StoreFileTrackerFactory.create(conf, tblDesc,
526        tblDesc.getColumnFamily(Bytes.toBytes(familyName)), regionFs));
527      fileInfoMap.computeIfAbsent(familyName, l -> new ArrayList<>());
528      List<StoreFileInfo> infos = fileInfoMap.get(familyName);
529      infos.add(sfi);
530    }
531    for (Map.Entry<String, StoreFileTracker> entry : trackerMap.entrySet()) {
532      entry.getValue().add(fileInfoMap.get(entry.getKey()));
533    }
534  }
535
536  private void insertRegionfilePathsIntoStoreTracker(List<StoreFileInfo> allFiles,
537    MasterProcedureEnv env, HRegionFileSystem regionFs) throws IOException {
538    TableDescriptor tblDesc =
539      env.getMasterServices().getTableDescriptors().get(regionInfo.getTable());
540    // we need to map trackers per store
541    Map<String, StoreFileTracker> trackerMap = new HashMap<>();
542    // we need to map store files per store
543    Map<String, List<StoreFileInfo>> fileInfoMap = new HashMap<>();
544    for (StoreFileInfo file : allFiles) {
545      String familyName = file.getPath().getParent().getName();
546      trackerMap.computeIfAbsent(familyName, t -> StoreFileTrackerFactory.create(conf, tblDesc,
547        tblDesc.getColumnFamily(familyName.getBytes()), regionFs));
548      fileInfoMap.computeIfAbsent(familyName, l -> new ArrayList<>());
549      List<StoreFileInfo> infos = fileInfoMap.get(familyName);
550      infos.add(file);
551    }
552    for (Map.Entry<String, StoreFileTracker> entry : trackerMap.entrySet()) {
553      entry.getValue().add(fileInfoMap.get(entry.getKey()));
554    }
555  }
556
557  /**
558   * Creates region split daughter directories under the table dir. If the daughter regions already
559   * exist, for example, in the case of a recovery from a previous failed split procedure, this
560   * method deletes the given region dir recursively, then recreates it again.
561   */
562  public void createSplitsDir(RegionInfo daughterA, RegionInfo daughterB) throws IOException {
563    Path daughterADir = getSplitsDir(daughterA);
564    if (fs.exists(daughterADir) && !deleteDir(daughterADir)) {
565      throw new IOException("Failed deletion of " + daughterADir + " before creating them again.");
566
567    }
568    if (!createDir(daughterADir)) {
569      throw new IOException("Failed create of " + daughterADir);
570    }
571    Path daughterBDir = getSplitsDir(daughterB);
572    if (fs.exists(daughterBDir) && !deleteDir(daughterBDir)) {
573      throw new IOException("Failed deletion of " + daughterBDir + " before creating them again.");
574
575    }
576    if (!createDir(daughterBDir)) {
577      throw new IOException("Failed create of " + daughterBDir);
578    }
579  }
580
581  /**
582   * Write out a split reference. Package local so it doesnt leak out of regionserver.
583   * @param hri         {@link RegionInfo} of the destination
584   * @param familyName  Column Family Name
585   * @param f           File to split.
586   * @param splitRow    Split Row
587   * @param top         True if we are referring to the top half of the hfile.
588   * @param splitPolicy A split policy instance; be careful! May not be full populated; e.g. if this
589   *                    method is invoked on the Master side, then the RegionSplitPolicy will NOT
590   *                    have a reference to a Region.
591   * @return Path to created reference.
592   */
593  public StoreFileInfo splitStoreFile(RegionInfo hri, String familyName, HStoreFile f,
594    byte[] splitRow, boolean top, RegionSplitPolicy splitPolicy, StoreFileTracker tracker)
595    throws IOException {
596    Path splitDir = new Path(getSplitsDir(hri), familyName);
597    // Add the referred-to regions name as a dot separated suffix.
598    // See REF_NAME_REGEX regex above. The referred-to regions name is
599    // up in the path of the passed in <code>f</code> -- parentdir is family,
600    // then the directory above is the region name.
601    String parentRegionName = regionInfoForFs.getEncodedName();
602    // Write reference with same file id only with the other region name as
603    // suffix and into the new region location (under same family).
604    Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
605    if (fs.exists(p)) {
606      LOG.warn("Found an already existing split file for {}. Assuming this is a recovery.", p);
607      return tracker.getStoreFileInfo(fs.getFileStatus(p), p, true);
608    }
609    boolean createLinkFile = false;
610    if (splitPolicy == null || !splitPolicy.skipStoreFileRangeCheck(familyName)) {
611      // Check whether the split row lies in the range of the store file
612      // If it is outside the range, return directly.
613      f.initReader();
614      try {
615        Cell splitKey = PrivateCellUtil.createFirstOnRow(splitRow);
616        Optional<ExtendedCell> lastKey = f.getLastKey();
617        Optional<ExtendedCell> firstKey = f.getFirstKey();
618        if (top) {
619          // check if larger than last key.
620          // If lastKey is null means storefile is empty.
621          if (!lastKey.isPresent()) {
622            return null;
623          }
624          if (f.getComparator().compare(splitKey, lastKey.get()) > 0) {
625            return null;
626          }
627          if (firstKey.isPresent() && f.getComparator().compare(splitKey, firstKey.get()) <= 0) {
628            LOG.debug("Will create HFileLink file for {}, top=true", f.getPath());
629            createLinkFile = true;
630          }
631        } else {
632          // check if smaller than first key
633          // If firstKey is null means storefile is empty.
634          if (!firstKey.isPresent()) {
635            return null;
636          }
637          if (f.getComparator().compare(splitKey, firstKey.get()) < 0) {
638            return null;
639          }
640          if (lastKey.isPresent() && f.getComparator().compare(splitKey, lastKey.get()) >= 0) {
641            LOG.debug("Will create HFileLink file for {}, top=false", f.getPath());
642            createLinkFile = true;
643          }
644        }
645      } finally {
646        f.closeStoreFile(f.getCacheConf() != null ? f.getCacheConf().shouldEvictOnClose() : true);
647      }
648    }
649    if (createLinkFile) {
650      // create HFileLink file instead of Reference file for child
651      String hfileName = f.getPath().getName();
652      TableName linkedTable = regionInfoForFs.getTable();
653      String linkedRegion = regionInfoForFs.getEncodedName();
654      try {
655        if (HFileLink.isHFileLink(hfileName)) {
656          Matcher m = LINK_NAME_PATTERN.matcher(hfileName);
657          if (!m.matches()) {
658            throw new IllegalArgumentException(hfileName + " is not a valid HFileLink name!");
659          }
660          linkedTable = TableName.valueOf(m.group(1), m.group(2));
661          linkedRegion = m.group(3);
662          hfileName = m.group(4);
663        }
664        // must create back reference here
665        HFileLink hFileLink = tracker.createHFileLink(linkedTable, linkedRegion, hfileName, true);
666        Path path =
667          new Path(splitDir, HFileLink.createHFileLinkName(linkedTable, linkedRegion, hfileName));
668        LOG.info("Created linkFile:" + path.toString() + " for child: " + hri.getEncodedName()
669          + ", parent: " + regionInfoForFs.getEncodedName());
670        return new StoreFileInfo(conf, fs, path, hFileLink);
671      } catch (IOException e) {
672        // if create HFileLink file failed, then just skip the error and create Reference file
673        LOG.error("Create link file for " + hfileName + " for child " + hri.getEncodedName()
674          + "failed, will create Reference file", e);
675      }
676    }
677    // A reference to the bottom half of the hsf store file.
678    Reference r =
679      top ? Reference.createTopReference(splitRow) : Reference.createBottomReference(splitRow);
680    tracker.createReference(r, p);
681    return new StoreFileInfo(conf, fs, p, r);
682  }
683
684  // ===========================================================================
685  // Merge Helpers
686  // ===========================================================================
687
688  Path getMergesDir(final RegionInfo hri) {
689    return new Path(getTableDir(), hri.getEncodedName());
690  }
691
692  /**
693   * Remove merged region
694   * @param mergedRegion {@link RegionInfo}
695   */
696  public void cleanupMergedRegion(final RegionInfo mergedRegion) throws IOException {
697    Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName());
698    if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
699      throw new IOException("Failed delete of " + regionDir);
700    }
701  }
702
703  static boolean mkdirs(FileSystem fs, Configuration conf, Path dir) throws IOException {
704    if (
705      FSUtils.isDistributedFileSystem(fs)
706        || !conf.getBoolean(HConstants.ENABLE_DATA_FILE_UMASK, false)
707    ) {
708      return fs.mkdirs(dir);
709    }
710    FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
711    return fs.mkdirs(dir, perms);
712  }
713
714  /**
715   * Write out a merge reference under the given merges directory.
716   * @param mergingRegion {@link RegionInfo} for one of the regions being merged.
717   * @param familyName    Column Family Name
718   * @param f             File to create reference.
719   * @return Path to created reference.
720   * @throws IOException if the merge write fails.
721   */
722  public StoreFileInfo mergeStoreFile(RegionInfo mergingRegion, String familyName, HStoreFile f,
723    StoreFileTracker tracker) throws IOException {
724    Path referenceDir = new Path(getMergesDir(regionInfoForFs), familyName);
725    // A whole reference to the store file.
726    Reference r = Reference.createTopReference(mergingRegion.getStartKey());
727    // Add the referred-to regions name as a dot separated suffix.
728    // See REF_NAME_REGEX regex above. The referred-to regions name is
729    // up in the path of the passed in <code>f</code> -- parentdir is family,
730    // then the directory above is the region name.
731    String mergingRegionName = mergingRegion.getEncodedName();
732    // Write reference with same file id only with the other region name as
733    // suffix and into the new region location (under same family).
734    Path p = new Path(referenceDir, f.getPath().getName() + "." + mergingRegionName);
735    tracker.createReference(r, p);
736    StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, p, r);
737    return storeFileInfo;
738  }
739
740  /**
741   * Commit a merged region, making it ready for use.
742   */
743  public void commitMergedRegion(List<StoreFileInfo> allMergedFiles, MasterProcedureEnv env)
744    throws IOException {
745    Path regionDir = getMergesDir(regionInfoForFs);
746    if (regionDir != null && fs.exists(regionDir)) {
747      // Write HRI to a file in case we need to recover hbase:meta
748      Path regionInfoFile = new Path(regionDir, REGION_INFO_FILE);
749      byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
750      writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
751      insertRegionfilePathsIntoStoreTracker(allMergedFiles, env, this);
752    }
753  }
754
755  // ===========================================================================
756  // Create/Open/Delete Helpers
757  // ===========================================================================
758
759  /** Returns Content of the file we write out to the filesystem under a region */
760  private static byte[] getRegionInfoFileContent(final RegionInfo hri) throws IOException {
761    return RegionInfo.toDelimitedByteArray(hri);
762  }
763
764  /**
765   * Create a {@link RegionInfo} from the serialized version on-disk.
766   * @param fs        {@link FileSystem} that contains the Region Info file
767   * @param regionDir {@link Path} to the Region Directory that contains the Info file
768   * @return An {@link RegionInfo} instance gotten from the Region Info file.
769   * @throws IOException if an error occurred during file open/read operation.
770   */
771  public static RegionInfo loadRegionInfoFileContent(final FileSystem fs, final Path regionDir)
772    throws IOException {
773    FSDataInputStream in = fs.open(new Path(regionDir, REGION_INFO_FILE));
774    try {
775      return RegionInfo.parseFrom(in);
776    } finally {
777      in.close();
778    }
779  }
780
781  /**
782   * Write the .regioninfo file on-disk.
783   * <p/>
784   * Overwrites if exists already.
785   */
786  private static void writeRegionInfoFileContent(final Configuration conf, final FileSystem fs,
787    final Path regionInfoFile, final byte[] content) throws IOException {
788    // First check to get the permissions
789    FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
790    // Write the RegionInfo file content
791    // HBASE-29662: Fail .regioninfo file creation, if the region directory doesn't exist,
792    // avoiding silent masking of missing region directories during region initialization.
793    // The region directory should already exist when this method is called.
794    try (FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null, false)) {
795      out.write(content);
796    }
797  }
798
799  /**
800   * Write out an info file under the stored region directory. Useful recovering mangled regions. If
801   * the regionInfo already exists on-disk, then we fast exit.
802   */
803  void checkRegionInfoOnFilesystem() throws IOException {
804    // Compose the content of the file so we can compare to length in filesystem. If not same,
805    // rewrite it (it may have been written in the old format using Writables instead of pb). The
806    // pb version is much shorter -- we write now w/o the toString version -- so checking length
807    // only should be sufficient. I don't want to read the file every time to check if it pb
808    // serialized.
809    byte[] content = getRegionInfoFileContent(regionInfoForFs);
810
811    // Verify if the region directory exists before opening a region. We need to do this since if
812    // the region directory doesn't exist we will re-create the region directory and a new HRI
813    // when HRegion.openHRegion() is called.
814    try {
815      FileStatus status = fs.getFileStatus(getRegionDir());
816    } catch (FileNotFoundException e) {
817      LOG.warn(getRegionDir() + " doesn't exist for region: " + regionInfoForFs.getEncodedName()
818        + " on table " + regionInfo.getTable());
819    }
820
821    try {
822      Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
823      FileStatus status = fs.getFileStatus(regionInfoFile);
824      if (status != null && status.getLen() == content.length) {
825        // Then assume the content good and move on.
826        // NOTE: that the length is not sufficient to define the the content matches.
827        return;
828      }
829
830      LOG.info("Rewriting .regioninfo file at: " + regionInfoFile);
831      if (!fs.delete(regionInfoFile, false)) {
832        throw new IOException("Unable to remove existing " + regionInfoFile);
833      }
834    } catch (FileNotFoundException e) {
835      LOG.warn(REGION_INFO_FILE + " file not found for region: " + regionInfoForFs.getEncodedName()
836        + " on table " + regionInfo.getTable());
837    }
838
839    // Write HRI to a file in case we need to recover hbase:meta
840    writeRegionInfoOnFilesystem(content, true);
841  }
842
843  /**
844   * Write out an info file under the region directory. Useful recovering mangled regions.
845   * @param useTempDir indicate whether or not using the region .tmp dir for a safer file creation.
846   */
847  private void writeRegionInfoOnFilesystem(boolean useTempDir) throws IOException {
848    byte[] content = getRegionInfoFileContent(regionInfoForFs);
849    writeRegionInfoOnFilesystem(content, useTempDir);
850  }
851
852  /**
853   * Write out an info file under the region directory. Useful recovering mangled regions.
854   * @param regionInfoContent serialized version of the {@link RegionInfo}
855   * @param useTempDir        indicate whether or not using the region .tmp dir for a safer file
856   *                          creation.
857   */
858  private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent, final boolean useTempDir)
859    throws IOException {
860    Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
861    if (useTempDir) {
862      // Create in tmpDir and then move into place in case we crash after
863      // create but before close. If we don't successfully close the file,
864      // subsequent region reopens will fail the below because create is
865      // registered in NN.
866
867      // And then create the file
868      Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);
869
870      // If datanode crashes or if the RS goes down just before the close is called while trying to
871      // close the created regioninfo file in the .tmp directory then on next
872      // creation we will be getting AlreadyCreatedException.
873      // Hence delete and create the file if exists.
874      if (CommonFSUtils.isExists(fs, tmpPath)) {
875        CommonFSUtils.delete(fs, tmpPath, true);
876      }
877
878      // Check parent (region) directory exists first to maintain HBASE-29662 protection
879      if (!fs.exists(getRegionDir())) {
880        throw new IOException("Region directory does not exist: " + getRegionDir());
881      }
882      if (!fs.exists(getTempDir())) {
883        fs.mkdirs(getTempDir());
884      }
885
886      // Write HRI to a file in case we need to recover hbase:meta
887      writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
888
889      // Move the created file to the original path
890      if (fs.exists(tmpPath) && !rename(tmpPath, regionInfoFile)) {
891        throw new IOException("Unable to rename " + tmpPath + " to " + regionInfoFile);
892      }
893    } else {
894      // Write HRI to a file in case we need to recover hbase:meta
895      writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
896    }
897  }
898
899  /**
900   * Create a new Region on file-system.
901   * @param conf       the {@link Configuration} to use
902   * @param fs         {@link FileSystem} from which to add the region
903   * @param tableDir   {@link Path} to where the table is being stored
904   * @param regionInfo {@link RegionInfo} for region to be added
905   * @throws IOException if the region creation fails due to a FileSystem exception.
906   */
907  public static HRegionFileSystem createRegionOnFileSystem(final Configuration conf,
908    final FileSystem fs, final Path tableDir, final RegionInfo regionInfo) throws IOException {
909    HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
910
911    // We only create a .regioninfo and the region directory if this is the default region replica
912    if (regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
913      Path regionDir = regionFs.getRegionDir();
914      if (fs.exists(regionDir)) {
915        LOG.warn("Trying to create a region that already exists on disk: " + regionDir);
916      } else {
917        // Create the region directory
918        if (!createDirOnFileSystem(fs, conf, regionDir)) {
919          LOG.warn("Unable to create the region directory: " + regionDir);
920          throw new IOException("Unable to create region directory: " + regionDir);
921        }
922      }
923
924      // Write HRI to a file in case we need to recover hbase:meta
925      regionFs.writeRegionInfoOnFilesystem(false);
926    } else {
927      if (LOG.isDebugEnabled())
928        LOG.debug("Skipping creation of .regioninfo file for " + regionInfo);
929    }
930    return regionFs;
931  }
932
933  /**
934   * Open Region from file-system.
935   * @param conf       the {@link Configuration} to use
936   * @param fs         {@link FileSystem} from which to add the region
937   * @param tableDir   {@link Path} to where the table is being stored
938   * @param regionInfo {@link RegionInfo} for region to be added
939   * @param readOnly   True if you don't want to edit the region data
940   * @throws IOException if the region creation fails due to a FileSystem exception.
941   */
942  public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
943    final FileSystem fs, final Path tableDir, final RegionInfo regionInfo, boolean readOnly)
944    throws IOException {
945    HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
946    Path regionDir = regionFs.getRegionDir();
947
948    if (!fs.exists(regionDir)) {
949      LOG.warn("Trying to open a region that do not exists on disk: " + regionDir);
950      throw new IOException("The specified region do not exists on disk: " + regionDir);
951    }
952
953    if (!readOnly) {
954      // Cleanup temporary directories
955      regionFs.cleanupTempDir();
956
957      // If it doesn't exists, Write HRI to a file, in case we need to recover hbase:meta
958      // Only create HRI if we are the default replica
959      if (regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
960        regionFs.checkRegionInfoOnFilesystem();
961      } else {
962        if (LOG.isDebugEnabled()) {
963          LOG.debug("Skipping creation of .regioninfo file for " + regionInfo);
964        }
965      }
966    }
967
968    return regionFs;
969  }
970
971  /**
972   * Remove the region from the table directory, archiving the region's hfiles.
973   * @param conf       the {@link Configuration} to use
974   * @param fs         {@link FileSystem} from which to remove the region
975   * @param tableDir   {@link Path} to where the table is being stored
976   * @param regionInfo {@link RegionInfo} for region to be deleted
977   * @throws IOException if the request cannot be completed
978   */
979  public static void deleteRegionFromFileSystem(final Configuration conf, final FileSystem fs,
980    final Path tableDir, final RegionInfo regionInfo) throws IOException {
981    HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
982    Path regionDir = regionFs.getRegionDir();
983
984    if (!fs.exists(regionDir)) {
985      LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
986      return;
987    }
988
989    if (LOG.isDebugEnabled()) {
990      LOG.debug("DELETING region " + regionDir);
991    }
992
993    // Archive region
994    Path rootDir = CommonFSUtils.getRootDir(conf);
995    HFileArchiver.archiveRegion(conf, fs, rootDir, tableDir, regionDir);
996
997    // Delete empty region dir
998    if (!fs.delete(regionDir, true)) {
999      LOG.warn("Failed delete of " + regionDir);
1000    }
1001  }
1002
1003  /**
1004   * Retrieves the Region ID from the given HFile path.
1005   * @param hFilePath The path of the HFile.
1006   * @return The Region ID extracted from the HFile path.
1007   * @throws IOException If an I/O error occurs or if the HFile path is incorrect.
1008   */
1009  public static String getRegionId(Path hFilePath) throws IOException {
1010    if (hFilePath.getParent() == null || hFilePath.getParent().getParent() == null) {
1011      throw new IOException("Incorrect HFile Path: " + hFilePath);
1012    }
1013    Path dir = hFilePath.getParent().getParent();
1014    if (isTemporaryDirectoryName(dir.getName())) {
1015      if (dir.getParent() == null) {
1016        throw new IOException("Incorrect HFile Path: " + hFilePath);
1017      }
1018      return dir.getParent().getName();
1019    }
1020    return dir.getName();
1021  }
1022
1023  private static boolean isTemporaryDirectoryName(String dirName) {
1024    return REGION_MERGES_DIR.equals(dirName) || REGION_SPLITS_DIR.equals(dirName)
1025      || REGION_TEMP_DIR.equals(dirName);
1026  }
1027
1028  /**
1029   * Creates a directory. Assumes the user has already checked for this directory existence.
1030   * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks
1031   *         whether the directory exists or not, and returns true if it exists.
1032   */
1033  boolean createDir(Path dir) throws IOException {
1034    int i = 0;
1035    IOException lastIOE = null;
1036    do {
1037      try {
1038        return mkdirs(fs, conf, dir);
1039      } catch (IOException ioe) {
1040        lastIOE = ioe;
1041        if (fs.exists(dir)) return true; // directory is present
1042        try {
1043          sleepBeforeRetry("Create Directory", i + 1);
1044        } catch (InterruptedException e) {
1045          throw (InterruptedIOException) new InterruptedIOException().initCause(e);
1046        }
1047      }
1048    } while (++i <= hdfsClientRetriesNumber);
1049    throw new IOException("Exception in createDir", lastIOE);
1050  }
1051
1052  /**
1053   * Renames a directory. Assumes the user has already checked for this directory existence.
1054   * @return true if rename is successful.
1055   */
1056  boolean rename(Path srcpath, Path dstPath) throws IOException {
1057    IOException lastIOE = null;
1058    int i = 0;
1059    do {
1060      try {
1061        return fs.rename(srcpath, dstPath);
1062      } catch (IOException ioe) {
1063        lastIOE = ioe;
1064        if (!fs.exists(srcpath) && fs.exists(dstPath)) return true; // successful move
1065        // dir is not there, retry after some time.
1066        try {
1067          sleepBeforeRetry("Rename Directory", i + 1);
1068        } catch (InterruptedException e) {
1069          throw (InterruptedIOException) new InterruptedIOException().initCause(e);
1070        }
1071      }
1072    } while (++i <= hdfsClientRetriesNumber);
1073
1074    throw new IOException("Exception in rename", lastIOE);
1075  }
1076
1077  /**
1078   * Deletes a directory. Assumes the user has already checked for this directory existence.
1079   * @return true if the directory is deleted.
1080   */
1081  boolean deleteDir(Path dir) throws IOException {
1082    IOException lastIOE = null;
1083    int i = 0;
1084    do {
1085      try {
1086        return fs.delete(dir, true);
1087      } catch (IOException ioe) {
1088        lastIOE = ioe;
1089        if (!fs.exists(dir)) return true;
1090        // dir is there, retry deleting after some time.
1091        try {
1092          sleepBeforeRetry("Delete Directory", i + 1);
1093        } catch (InterruptedException e) {
1094          throw (InterruptedIOException) new InterruptedIOException().initCause(e);
1095        }
1096      }
1097    } while (++i <= hdfsClientRetriesNumber);
1098
1099    throw new IOException("Exception in DeleteDir", lastIOE);
1100  }
1101
1102  /**
1103   * sleeping logic; handles the interrupt exception.
1104   */
1105  private void sleepBeforeRetry(String msg, int sleepMultiplier) throws InterruptedException {
1106    sleepBeforeRetry(msg, sleepMultiplier, baseSleepBeforeRetries, hdfsClientRetriesNumber);
1107  }
1108
1109  /**
1110   * Creates a directory for a filesystem and configuration object. Assumes the user has already
1111   * checked for this directory existence.
1112   * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks
1113   *         whether the directory exists or not, and returns true if it exists.
1114   */
1115  private static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir)
1116    throws IOException {
1117    int i = 0;
1118    IOException lastIOE = null;
1119    int hdfsClientRetriesNumber =
1120      conf.getInt("hdfs.client.retries.number", DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
1121    int baseSleepBeforeRetries =
1122      conf.getInt("hdfs.client.sleep.before.retries", DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
1123    do {
1124      try {
1125        return fs.mkdirs(dir);
1126      } catch (IOException ioe) {
1127        lastIOE = ioe;
1128        if (fs.exists(dir)) return true; // directory is present
1129        try {
1130          sleepBeforeRetry("Create Directory", i + 1, baseSleepBeforeRetries,
1131            hdfsClientRetriesNumber);
1132        } catch (InterruptedException e) {
1133          throw (InterruptedIOException) new InterruptedIOException().initCause(e);
1134        }
1135      }
1136    } while (++i <= hdfsClientRetriesNumber);
1137
1138    throw new IOException("Exception in createDir", lastIOE);
1139  }
1140
1141  /**
1142   * sleeping logic for static methods; handles the interrupt exception. Keeping a static version
1143   * for this to avoid re-looking for the integer values.
1144   */
1145  private static void sleepBeforeRetry(String msg, int sleepMultiplier, int baseSleepBeforeRetries,
1146    int hdfsClientRetriesNumber) throws InterruptedException {
1147    if (sleepMultiplier > hdfsClientRetriesNumber) {
1148      if (LOG.isDebugEnabled()) {
1149        LOG.debug(msg + ", retries exhausted");
1150      }
1151      return;
1152    }
1153    if (LOG.isDebugEnabled()) {
1154      LOG.debug(msg + ", sleeping " + baseSleepBeforeRetries + " times " + sleepMultiplier);
1155    }
1156    Thread.sleep((long) baseSleepBeforeRetries * sleepMultiplier);
1157  }
1158
1159  public static HRegionFileSystem create(final Configuration conf, final FileSystem fs,
1160    final Path tableDir, final RegionInfo regionInfo) throws IOException {
1161    return new HRegionFileSystem(conf, fs, tableDir, regionInfo);
1162  }
1163}