001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.snapshot;
019
020import java.io.IOException;
021import java.security.PrivilegedExceptionAction;
022import java.util.Collections;
023import java.util.concurrent.TimeUnit;
024
025import org.apache.hadoop.conf.Configuration;
026import org.apache.hadoop.fs.FSDataInputStream;
027import org.apache.hadoop.fs.FSDataOutputStream;
028import org.apache.hadoop.fs.FileSystem;
029import org.apache.hadoop.fs.Path;
030import org.apache.hadoop.fs.permission.FsPermission;
031import org.apache.hadoop.hbase.HConstants;
032import org.apache.hadoop.hbase.TableName;
033import org.apache.hadoop.hbase.client.Admin;
034import org.apache.hadoop.hbase.client.Connection;
035import org.apache.hadoop.hbase.client.ConnectionFactory;
036import org.apache.hadoop.hbase.security.User;
037import org.apache.hadoop.hbase.security.access.PermissionStorage;
038import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
039import org.apache.hadoop.hbase.security.access.UserPermission;
040import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
041import org.apache.hadoop.hbase.util.FSUtils;
042import org.apache.yetus.audience.InterfaceAudience;
043import org.slf4j.Logger;
044import org.slf4j.LoggerFactory;
045
046import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
047import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
048import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
049
050/**
051 * Utility class to help manage {@link SnapshotDescription SnapshotDesriptions}.
052 * <p>
053 * Snapshots are laid out on disk like this:
054 *
055 * <pre>
056 * /hbase/.snapshots
057 *          /.tmp                &lt;---- working directory
058 *          /[snapshot name]     &lt;----- completed snapshot
059 * </pre>
060 *
061 * A completed snapshot named 'completed' then looks like (multiple regions, servers, files, etc.
062 * signified by '...' on the same directory depth).
063 *
064 * <pre>
065 * /hbase/.snapshots/completed
066 *                   .snapshotinfo          &lt;--- Description of the snapshot
067 *                   .tableinfo             &lt;--- Copy of the tableinfo
068 *                    /.logs
069 *                        /[server_name]
070 *                            /... [log files]
071 *                         ...
072 *                   /[region name]           &lt;---- All the region's information
073 *                   .regioninfo              &lt;---- Copy of the HRegionInfo
074 *                      /[column family name]
075 *                          /[hfile name]     &lt;--- name of the hfile in the real region
076 *                          ...
077 *                      ...
078 *                    ...
079 * </pre>
080 *
081 * Utility methods in this class are useful for getting the correct locations for different parts of
082 * the snapshot, as well as moving completed snapshots into place (see
083 * {@link #completeSnapshot}, and writing the
084 * {@link SnapshotDescription} to the working snapshot directory.
085 */
086@InterfaceAudience.Private
087public final class SnapshotDescriptionUtils {
088
089  /**
090   * Filter that only accepts completed snapshot directories
091   */
092  public static class CompletedSnaphotDirectoriesFilter extends FSUtils.BlackListDirFilter {
093
094    /**
095     * @param fs
096     */
097    public CompletedSnaphotDirectoriesFilter(FileSystem fs) {
098      super(fs, Collections.singletonList(SNAPSHOT_TMP_DIR_NAME));
099    }
100  }
101
102  private static final Logger LOG = LoggerFactory.getLogger(SnapshotDescriptionUtils.class);
103  /**
104   * Version of the fs layout for a snapshot. Future snapshots may have different file layouts,
105   * which we may need to read in differently.
106   */
107  public static final int SNAPSHOT_LAYOUT_VERSION = SnapshotManifestV2.DESCRIPTOR_VERSION;
108
109  // snapshot directory constants
110  /**
111   * The file contains the snapshot basic information and it is under the directory of a snapshot.
112   */
113  public static final String SNAPSHOTINFO_FILE = ".snapshotinfo";
114
115  /** Temporary directory under the snapshot directory to store in-progress snapshots */
116  public static final String SNAPSHOT_TMP_DIR_NAME = ".tmp";
117
118  /**
119   * The configuration property that determines the filepath of the snapshot
120   * base working directory
121   */
122  public static final String SNAPSHOT_WORKING_DIR = "hbase.snapshot.working.dir";
123
124  // snapshot operation values
125  /** Default value if no start time is specified */
126  public static final long NO_SNAPSHOT_START_TIME_SPECIFIED = 0;
127
128  // Default value if no ttl is specified for Snapshot
129  private static final long NO_SNAPSHOT_TTL_SPECIFIED = 0;
130
131  public static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS = "hbase.snapshot.master.timeout.millis";
132
133  /** By default, wait 300 seconds for a snapshot to complete */
134  public static final long DEFAULT_MAX_WAIT_TIME = 60000 * 5 ;
135
136  private SnapshotDescriptionUtils() {
137    // private constructor for utility class
138  }
139
140  /**
141   * @param conf {@link Configuration} from which to check for the timeout
142   * @param type type of snapshot being taken
143   * @param defaultMaxWaitTime Default amount of time to wait, if none is in the configuration
144   * @return the max amount of time the master should wait for a snapshot to complete
145   */
146  public static long getMaxMasterTimeout(Configuration conf, SnapshotDescription.Type type,
147      long defaultMaxWaitTime) {
148    String confKey;
149    switch (type) {
150    case DISABLED:
151    default:
152      confKey = MASTER_SNAPSHOT_TIMEOUT_MILLIS;
153    }
154    return Math.max(conf.getLong(confKey, defaultMaxWaitTime),
155        conf.getLong(MASTER_SNAPSHOT_TIMEOUT_MILLIS, defaultMaxWaitTime));
156  }
157
158  /**
159   * Get the snapshot root directory. All the snapshots are kept under this directory, i.e.
160   * ${hbase.rootdir}/.snapshot
161   * @param rootDir hbase root directory
162   * @return the base directory in which all snapshots are kept
163   */
164  public static Path getSnapshotRootDir(final Path rootDir) {
165    return new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
166  }
167
168  /**
169   * Get the directory for a specified snapshot. This directory is a sub-directory of snapshot root
170   * directory and all the data files for a snapshot are kept under this directory.
171   * @param snapshot snapshot being taken
172   * @param rootDir hbase root directory
173   * @return the final directory for the completed snapshot
174   */
175  public static Path getCompletedSnapshotDir(final SnapshotDescription snapshot, final Path rootDir) {
176    return getCompletedSnapshotDir(snapshot.getName(), rootDir);
177  }
178
179  /**
180   * Get the directory for a completed snapshot. This directory is a sub-directory of snapshot root
181   * directory and all the data files for a snapshot are kept under this directory.
182   * @param snapshotName name of the snapshot being taken
183   * @param rootDir hbase root directory
184   * @return the final directory for the completed snapshot
185   */
186  public static Path getCompletedSnapshotDir(final String snapshotName, final Path rootDir) {
187    return getSpecifiedSnapshotDir(getSnapshotsDir(rootDir), snapshotName);
188  }
189
190  /**
191   * Get the general working directory for snapshots - where they are built, where they are
192   * temporarily copied on export, etc.
193   * @param rootDir root directory of the HBase installation
194   * @param conf Configuration of the HBase instance
195   * @return Path to the snapshot tmp directory, relative to the passed root directory
196   */
197  public static Path getWorkingSnapshotDir(final Path rootDir, final Configuration conf) {
198    return new Path(conf.get(SNAPSHOT_WORKING_DIR,
199        getDefaultWorkingSnapshotDir(rootDir).toString()));
200  }
201
202  /**
203   * Get the directory to build a snapshot, before it is finalized
204   * @param snapshot snapshot that will be built
205   * @param rootDir root directory of the hbase installation
206   * @param conf Configuration of the HBase instance
207   * @return {@link Path} where one can build a snapshot
208   */
209  public static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir,
210      Configuration conf) {
211    return getWorkingSnapshotDir(snapshot.getName(), rootDir, conf);
212  }
213
214  /**
215   * Get the directory to build a snapshot, before it is finalized
216   * @param snapshotName name of the snapshot
217   * @param rootDir root directory of the hbase installation
218   * @param conf Configuration of the HBase instance
219   * @return {@link Path} where one can build a snapshot
220   */
221  public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir,
222      Configuration conf) {
223    return getSpecifiedSnapshotDir(getWorkingSnapshotDir(rootDir, conf), snapshotName);
224  }
225
226  /**
227   * Get the directory within the given filepath to store the snapshot instance
228   * @param snapshotsDir directory to store snapshot directory within
229   * @param snapshotName name of the snapshot to take
230   * @return the final directory for the snapshot in the given filepath
231   */
232  private static final Path getSpecifiedSnapshotDir(final Path snapshotsDir, String snapshotName) {
233    return new Path(snapshotsDir, snapshotName);
234  }
235
236  /**
237   * @param rootDir hbase root directory
238   * @return the directory for all completed snapshots;
239   */
240  public static final Path getSnapshotsDir(Path rootDir) {
241    return new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
242  }
243
244  /**
245   * Determines if the given workingDir is a subdirectory of the given "root directory"
246   * @param workingDir a directory to check
247   * @param rootDir root directory of the HBase installation
248   * @return true if the given workingDir is a subdirectory of the given root directory,
249   *   false otherwise
250   */
251  public static boolean isSubDirectoryOf(final Path workingDir, final Path rootDir) {
252    return workingDir.toString().startsWith(rootDir.toString() + Path.SEPARATOR);
253  }
254
255  /**
256   * Determines if the given workingDir is a subdirectory of the default working snapshot directory
257   * @param workingDir a directory to check
258   * @param conf configuration for the HBase cluster
259   * @return true if the given workingDir is a subdirectory of the default working directory for
260   *   snapshots, false otherwise
261   * @throws IOException if we can't get the root dir
262   */
263  public static boolean isWithinDefaultWorkingDir(final Path workingDir, Configuration conf)
264    throws IOException {
265    Path defaultWorkingDir = getDefaultWorkingSnapshotDir(FSUtils.getRootDir(conf));
266    return workingDir.equals(defaultWorkingDir) || isSubDirectoryOf(workingDir, defaultWorkingDir);
267  }
268
269  /**
270   * Get the default working directory for snapshots - where they are built, where they are
271   * temporarily copied on export, etc.
272   * @param rootDir root directory of the HBase installation
273   * @return Path to the default snapshot tmp directory, relative to the passed root directory
274   */
275  private static Path getDefaultWorkingSnapshotDir(final Path rootDir) {
276    return new Path(getSnapshotsDir(rootDir), SNAPSHOT_TMP_DIR_NAME);
277  }
278
279  /**
280   * Convert the passed snapshot description into a 'full' snapshot description based on default
281   * parameters, if none have been supplied. This resolves any 'optional' parameters that aren't
282   * supplied to their default values.
283   * @param snapshot general snapshot descriptor
284   * @param conf Configuration to read configured snapshot defaults if snapshot is not complete
285   * @return a valid snapshot description
286   * @throws IllegalArgumentException if the {@link SnapshotDescription} is not a complete
287   *           {@link SnapshotDescription}.
288   */
289  public static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf)
290      throws IllegalArgumentException, IOException {
291    if (!snapshot.hasTable()) {
292      throw new IllegalArgumentException(
293          "Descriptor doesn't apply to a table, so we can't build it.");
294    }
295
296    // set the creation time, if one hasn't been set
297    long time = snapshot.getCreationTime();
298    if (time == SnapshotDescriptionUtils.NO_SNAPSHOT_START_TIME_SPECIFIED) {
299      time = EnvironmentEdgeManager.currentTime();
300      LOG.debug("Creation time not specified, setting to:" + time + " (current time:"
301          + EnvironmentEdgeManager.currentTime() + ").");
302      SnapshotDescription.Builder builder = snapshot.toBuilder();
303      builder.setCreationTime(time);
304      snapshot = builder.build();
305    }
306
307    long ttl = snapshot.getTtl();
308    // set default ttl(sec) if it is not set already or the value is out of the range
309    if (ttl == SnapshotDescriptionUtils.NO_SNAPSHOT_TTL_SPECIFIED ||
310        ttl > TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) {
311      final long defaultSnapshotTtl = conf.getLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY,
312          HConstants.DEFAULT_SNAPSHOT_TTL);
313      if (LOG.isDebugEnabled()) {
314        LOG.debug("Snapshot current TTL value: {} resetting it to default value: {}", ttl,
315            defaultSnapshotTtl);
316      }
317      ttl = defaultSnapshotTtl;
318    }
319    SnapshotDescription.Builder builder = snapshot.toBuilder();
320    builder.setTtl(ttl);
321    snapshot = builder.build();
322
323    // set the acl to snapshot if security feature is enabled.
324    if (isSecurityAvailable(conf)) {
325      snapshot = writeAclToSnapshotDescription(snapshot, conf);
326    }
327    return snapshot;
328  }
329
330  /**
331   * Write the snapshot description into the working directory of a snapshot
332   * @param snapshot description of the snapshot being taken
333   * @param workingDir working directory of the snapshot
334   * @param fs {@link FileSystem} on which the snapshot should be taken
335   * @throws IOException if we can't reach the filesystem and the file cannot be cleaned up on
336   *           failure
337   */
338  public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs)
339      throws IOException {
340    FsPermission perms = FSUtils.getFilePermissions(fs, fs.getConf(),
341      HConstants.DATA_FILE_UMASK_KEY);
342    Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
343    try {
344      FSDataOutputStream out = FSUtils.create(fs, snapshotInfo, perms, true);
345      try {
346        snapshot.writeTo(out);
347      } finally {
348        out.close();
349      }
350    } catch (IOException e) {
351      // if we get an exception, try to remove the snapshot info
352      if (!fs.delete(snapshotInfo, false)) {
353        String msg = "Couldn't delete snapshot info file: " + snapshotInfo;
354        LOG.error(msg);
355        throw new IOException(msg);
356      }
357    }
358  }
359
360  /**
361   * Read in the {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} stored for the snapshot in the passed directory
362   * @param fs filesystem where the snapshot was taken
363   * @param snapshotDir directory where the snapshot was stored
364   * @return the stored snapshot description
365   * @throws CorruptedSnapshotException if the
366   * snapshot cannot be read
367   */
368  public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir)
369      throws CorruptedSnapshotException {
370    Path snapshotInfo = new Path(snapshotDir, SNAPSHOTINFO_FILE);
371    try {
372      FSDataInputStream in = null;
373      try {
374        in = fs.open(snapshotInfo);
375        SnapshotDescription desc = SnapshotDescription.parseFrom(in);
376        return desc;
377      } finally {
378        if (in != null) in.close();
379      }
380    } catch (IOException e) {
381      throw new CorruptedSnapshotException("Couldn't read snapshot info from:" + snapshotInfo, e);
382    }
383  }
384
385  /**
386   * Move the finished snapshot to its final, publicly visible directory - this marks the snapshot
387   * as 'complete'.
388   * @param snapshot description of the snapshot being tabken
389   * @param rootdir root directory of the hbase installation
390   * @param workingDir directory where the in progress snapshot was built
391   * @param fs {@link FileSystem} where the snapshot was built
392   * @throws org.apache.hadoop.hbase.snapshot.SnapshotCreationException if the
393   * snapshot could not be moved
394   * @throws IOException the filesystem could not be reached
395   */
396  public static void completeSnapshot(SnapshotDescription snapshot, Path rootdir, Path workingDir,
397      FileSystem fs) throws SnapshotCreationException, IOException {
398    Path finishedDir = getCompletedSnapshotDir(snapshot, rootdir);
399    LOG.debug("Snapshot is done, just moving the snapshot from " + workingDir + " to "
400        + finishedDir);
401    if (!fs.rename(workingDir, finishedDir)) {
402      throw new SnapshotCreationException(
403          "Failed to move working directory(" + workingDir + ") to completed directory("
404              + finishedDir + ").", ProtobufUtil.createSnapshotDesc(snapshot));
405    }
406  }
407
408  /**
409   * Check if the user is this table snapshot's owner
410   * @param snapshot the table snapshot description
411   * @param user the user
412   * @return true if the user is the owner of the snapshot,
413   *         false otherwise or the snapshot owner field is not present.
414   */
415  public static boolean isSnapshotOwner(org.apache.hadoop.hbase.client.SnapshotDescription snapshot,
416      User user) {
417    if (user == null) return false;
418    return user.getShortName().equals(snapshot.getOwner());
419  }
420
421  public static boolean isSecurityAvailable(Configuration conf) throws IOException {
422    try (Connection conn = ConnectionFactory.createConnection(conf)) {
423      try (Admin admin = conn.getAdmin()) {
424        return admin.tableExists(PermissionStorage.ACL_TABLE_NAME);
425      }
426    }
427  }
428
429  private static SnapshotDescription writeAclToSnapshotDescription(SnapshotDescription snapshot,
430      Configuration conf) throws IOException {
431    ListMultimap<String, UserPermission> perms =
432        User.runAsLoginUser(new PrivilegedExceptionAction<ListMultimap<String, UserPermission>>() {
433          @Override
434          public ListMultimap<String, UserPermission> run() throws Exception {
435            return PermissionStorage.getTablePermissions(conf,
436              TableName.valueOf(snapshot.getTable()));
437          }
438        });
439    return snapshot.toBuilder()
440        .setUsersAndPermissions(ShadedAccessControlUtil.toUserTablePermissions(perms)).build();
441  }
442}