001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 * http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018
019package org.apache.hadoop.hbase.security.access;
020
021import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
022import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
023import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
024import static org.apache.hadoop.fs.permission.AclEntryType.USER;
025import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
026
027import java.io.Closeable;
028import java.io.FileNotFoundException;
029import java.io.IOException;
030import java.util.ArrayList;
031import java.util.Collections;
032import java.util.HashSet;
033import java.util.List;
034import java.util.Map;
035import java.util.Set;
036import java.util.concurrent.CompletableFuture;
037import java.util.concurrent.ExecutionException;
038import java.util.concurrent.ExecutorService;
039import java.util.concurrent.Executors;
040import java.util.stream.Collectors;
041
042import org.apache.hadoop.conf.Configuration;
043import org.apache.hadoop.fs.FileStatus;
044import org.apache.hadoop.fs.FileSystem;
045import org.apache.hadoop.fs.Path;
046import org.apache.hadoop.fs.permission.AclEntry;
047import org.apache.hadoop.fs.permission.AclEntryScope;
048import org.apache.hadoop.fs.permission.FsPermission;
049import org.apache.hadoop.hbase.AuthUtil;
050import org.apache.hadoop.hbase.HConstants;
051import org.apache.hadoop.hbase.NamespaceDescriptor;
052import org.apache.hadoop.hbase.TableName;
053import org.apache.hadoop.hbase.client.Admin;
054import org.apache.hadoop.hbase.client.Connection;
055import org.apache.hadoop.hbase.client.SnapshotDescription;
056import org.apache.hadoop.hbase.client.TableDescriptor;
057import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
058import org.apache.hadoop.hbase.mob.MobUtils;
059import org.apache.hadoop.hbase.util.Bytes;
060import org.apache.yetus.audience.InterfaceAudience;
061import org.slf4j.Logger;
062import org.slf4j.LoggerFactory;
063
064import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
065import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
066import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
067import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
068
069/**
070 * A helper to modify or remove HBase granted user default and access HDFS ACLs over hFiles.
071 */
072@InterfaceAudience.Private
073public class SnapshotScannerHDFSAclHelper implements Closeable {
074  private static final Logger LOG = LoggerFactory.getLogger(SnapshotScannerHDFSAclHelper.class);
075
076  public static final String ACL_SYNC_TO_HDFS_ENABLE = "hbase.acl.sync.to.hdfs.enable";
077  public static final String ACL_SYNC_TO_HDFS_THREAD_NUMBER =
078      "hbase.acl.sync.to.hdfs.thread.number";
079  // The tmp directory to restore snapshot, it can not be a sub directory of HBase root dir
080  public static final String SNAPSHOT_RESTORE_TMP_DIR = "hbase.snapshot.restore.tmp.dir";
081  public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT =
082      "/hbase/.tmpdir-to-restore-snapshot";
083  // The default permission of the common directories if the feature is enabled.
084  public static final String COMMON_DIRECTORY_PERMISSION =
085      "hbase.acl.sync.to.hdfs.common.directory.permission";
086  // The secure HBase permission is 700, 751 means all others have execute access and the mask is
087  // set to read-execute to make the extended access ACL entries can work. Be cautious to set
088  // this value.
089  public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751";
090  // The default permission of the snapshot restore directories if the feature is enabled.
091  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION =
092      "hbase.acl.sync.to.hdfs.restore.directory.permission";
093  // 753 means all others have write-execute access.
094  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = "753";
095
096  private Admin admin;
097  private final Configuration conf;
098  private FileSystem fs;
099  private PathHelper pathHelper;
100  private ExecutorService pool;
101
102  public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection connection)
103      throws IOException {
104    this.conf = configuration;
105    this.pathHelper = new PathHelper(conf);
106    this.fs = pathHelper.getFileSystem();
107    this.pool = Executors.newFixedThreadPool(conf.getInt(ACL_SYNC_TO_HDFS_THREAD_NUMBER, 10),
108      new ThreadFactoryBuilder().setNameFormat("hdfs-acl-thread-%d").setDaemon(true).build());
109    this.admin = connection.getAdmin();
110  }
111
112  @Override
113  public void close() {
114    if (pool != null) {
115      pool.shutdown();
116    }
117    try {
118      admin.close();
119    } catch (IOException e) {
120      LOG.error("Close admin error", e);
121    }
122  }
123
124  public void setCommonDirectoryPermission() throws IOException {
125    // Set public directory permission to 751 to make all users have access permission.
126    // And we also need the access permission of the parent of HBase root directory, but
127    // it's not set here, because the owner of HBase root directory may don't own permission
128    // to change it's parent permission to 751.
129    // The {root/.tmp} and {root/.tmp/data} directories are created to make global user HDFS
130    // ACLs can be inherited.
131    List<Path> paths = Lists.newArrayList(pathHelper.getRootDir(), pathHelper.getMobDir(),
132      pathHelper.getTmpDir(), pathHelper.getArchiveDir());
133    paths.addAll(getGlobalRootPaths());
134    for (Path path : paths) {
135      createDirIfNotExist(path);
136      fs.setPermission(path, new FsPermission(
137          conf.get(COMMON_DIRECTORY_PERMISSION, COMMON_DIRECTORY_PERMISSION_DEFAULT)));
138    }
139    // create snapshot restore directory
140    Path restoreDir =
141        new Path(conf.get(SNAPSHOT_RESTORE_TMP_DIR, SNAPSHOT_RESTORE_TMP_DIR_DEFAULT));
142    createDirIfNotExist(restoreDir);
143    fs.setPermission(restoreDir, new FsPermission(conf.get(SNAPSHOT_RESTORE_DIRECTORY_PERMISSION,
144      SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT)));
145  }
146
147  /**
148   * Set acl when grant user permission
149   * @param userPermission the user and permission
150   * @param skipNamespaces the namespace set to skip set acl because already set
151   * @param skipTables the table set to skip set acl because already set
152   * @return false if an error occurred, otherwise true
153   */
154  public boolean grantAcl(UserPermission userPermission, Set<String> skipNamespaces,
155      Set<TableName> skipTables) {
156    try {
157      long start = System.currentTimeMillis();
158      handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.MODIFY, skipNamespaces,
159        skipTables);
160      LOG.info("Set HDFS acl when grant {}, cost {} ms", userPermission,
161        System.currentTimeMillis() - start);
162      return true;
163    } catch (Exception e) {
164      LOG.error("Set HDFS acl error when grant: {}", userPermission, e);
165      return false;
166    }
167  }
168
169  /**
170   * Remove acl when grant or revoke user permission
171   * @param userPermission the user and permission
172   * @param skipNamespaces the namespace set to skip remove acl
173   * @param skipTables the table set to skip remove acl
174   * @return false if an error occurred, otherwise true
175   */
176  public boolean revokeAcl(UserPermission userPermission, Set<String> skipNamespaces,
177      Set<TableName> skipTables) {
178    try {
179      long start = System.currentTimeMillis();
180      handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.REMOVE, skipNamespaces,
181        skipTables);
182      LOG.info("Set HDFS acl when revoke {}, cost {} ms", userPermission,
183        System.currentTimeMillis() - start);
184      return true;
185    } catch (Exception e) {
186      LOG.error("Set HDFS acl error when revoke: {}", userPermission, e);
187      return false;
188    }
189  }
190
191  /**
192   * Set acl when take a snapshot
193   * @param snapshot the snapshot desc
194   * @return false if an error occurred, otherwise true
195   */
196  public boolean snapshotAcl(SnapshotDescription snapshot) {
197    try {
198      long start = System.currentTimeMillis();
199      TableName tableName = snapshot.getTableName();
200      // global user permission can be inherited from default acl automatically
201      Set<String> userSet = getUsersWithTableReadAction(tableName, true, false);
202      if (userSet.size() > 0) {
203        Path path = pathHelper.getSnapshotDir(snapshot.getName());
204        handleHDFSAcl(new HDFSAclOperation(fs, path, userSet, HDFSAclOperation.OperationType.MODIFY,
205            true, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).get();
206      }
207      LOG.info("Set HDFS acl when snapshot {}, cost {} ms", snapshot.getName(),
208        System.currentTimeMillis() - start);
209      return true;
210    } catch (Exception e) {
211      LOG.error("Set HDFS acl error when snapshot {}", snapshot, e);
212      return false;
213    }
214  }
215
216  /**
217   * Remove table access acl from namespace dir when delete table
218   * @param tableName the table
219   * @param removeUsers the users whose access acl will be removed
220   * @return false if an error occurred, otherwise true
221   */
222  public boolean removeNamespaceAccessAcl(TableName tableName, Set<String> removeUsers,
223      String operation) {
224    try {
225      long start = System.currentTimeMillis();
226      if (removeUsers.size() > 0) {
227        handleNamespaceAccessAcl(tableName.getNamespaceAsString(), removeUsers,
228          HDFSAclOperation.OperationType.REMOVE);
229      }
230      LOG.info("Remove HDFS acl when {} table {}, cost {} ms", operation, tableName,
231        System.currentTimeMillis() - start);
232      return true;
233    } catch (Exception e) {
234      LOG.error("Remove HDFS acl error when {} table {}", operation, tableName, e);
235      return false;
236    }
237  }
238
239  /**
240   * Remove default acl from namespace archive dir when delete namespace
241   * @param namespace the namespace
242   * @param removeUsers the users whose default acl will be removed
243   * @return false if an error occurred, otherwise true
244   */
245  public boolean removeNamespaceDefaultAcl(String namespace, Set<String> removeUsers) {
246    try {
247      long start = System.currentTimeMillis();
248      Path archiveNsDir = pathHelper.getArchiveNsDir(namespace);
249      HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers,
250          HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
251      operation.handleAcl();
252      LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace,
253        System.currentTimeMillis() - start);
254      return true;
255    } catch (Exception e) {
256      LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e);
257      return false;
258    }
259  }
260
261  /**
262   * Remove default acl from table archive dir when delete table
263   * @param tableName the table name
264   * @param removeUsers the users whose default acl will be removed
265   * @return false if an error occurred, otherwise true
266   */
267  public boolean removeTableDefaultAcl(TableName tableName, Set<String> removeUsers) {
268    try {
269      long start = System.currentTimeMillis();
270      Path archiveTableDir = pathHelper.getArchiveTableDir(tableName);
271      HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers,
272          HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
273      operation.handleAcl();
274      LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName,
275        System.currentTimeMillis() - start);
276      return true;
277    } catch (Exception e) {
278      LOG.error("Remove HDFS acl error when delete table {}", tableName, e);
279      return false;
280    }
281  }
282
283  /**
284   * Add table user acls
285   * @param tableName the table
286   * @param users the table users with READ permission
287   * @return false if an error occurred, otherwise true
288   */
289  public boolean addTableAcl(TableName tableName, Set<String> users, String operation) {
290    try {
291      long start = System.currentTimeMillis();
292      if (users.size() > 0) {
293        HDFSAclOperation.OperationType operationType = HDFSAclOperation.OperationType.MODIFY;
294        handleNamespaceAccessAcl(tableName.getNamespaceAsString(), users, operationType);
295        handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0),
296          operationType);
297      }
298      LOG.info("Set HDFS acl when {} table {}, cost {} ms", operation, tableName,
299        System.currentTimeMillis() - start);
300      return true;
301    } catch (Exception e) {
302      LOG.error("Set HDFS acl error when {} table {}", operation, tableName, e);
303      return false;
304    }
305  }
306
307  /**
308   * Remove table acls when modify table
309   * @param tableName the table
310   * @param users the table users with READ permission
311   * @return false if an error occurred, otherwise true
312   */
313  public boolean removeTableAcl(TableName tableName, Set<String> users) {
314    try {
315      long start = System.currentTimeMillis();
316      if (users.size() > 0) {
317        handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0),
318          HDFSAclOperation.OperationType.REMOVE);
319      }
320      LOG.info("Set HDFS acl when create or modify table {}, cost {} ms", tableName,
321        System.currentTimeMillis() - start);
322      return true;
323    } catch (Exception e) {
324      LOG.error("Set HDFS acl error when create or modify table {}", tableName, e);
325      return false;
326    }
327  }
328
329  private void handleGrantOrRevokeAcl(UserPermission userPermission,
330      HDFSAclOperation.OperationType operationType, Set<String> skipNamespaces,
331      Set<TableName> skipTables) throws ExecutionException, InterruptedException, IOException {
332    Set<String> users = Sets.newHashSet(userPermission.getUser());
333    switch (userPermission.getAccessScope()) {
334      case GLOBAL:
335        handleGlobalAcl(users, skipNamespaces, skipTables, operationType);
336        break;
337      case NAMESPACE:
338        NamespacePermission namespacePermission =
339            (NamespacePermission) userPermission.getPermission();
340        handleNamespaceAcl(Sets.newHashSet(namespacePermission.getNamespace()), users,
341          skipNamespaces, skipTables, operationType);
342        break;
343      case TABLE:
344        TablePermission tablePermission = (TablePermission) userPermission.getPermission();
345        handleNamespaceAccessAcl(tablePermission.getNamespace(), users, operationType);
346        handleTableAcl(Sets.newHashSet(tablePermission.getTableName()), users, skipNamespaces,
347          skipTables, operationType);
348        break;
349      default:
350        throw new IllegalArgumentException(
351            "Illegal user permission scope " + userPermission.getAccessScope());
352    }
353  }
354
355  private void handleGlobalAcl(Set<String> users, Set<String> skipNamespaces,
356      Set<TableName> skipTables, HDFSAclOperation.OperationType operationType)
357      throws ExecutionException, InterruptedException, IOException {
358    // handle global root directories HDFS acls
359    List<HDFSAclOperation> hdfsAclOperations = getGlobalRootPaths().stream()
360        .map(path -> new HDFSAclOperation(fs, path, users, operationType, false,
361            HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS))
362        .collect(Collectors.toList());
363    handleHDFSAclParallel(hdfsAclOperations).get();
364    // handle namespace HDFS acls
365    handleNamespaceAcl(Sets.newHashSet(admin.listNamespaces()), users, skipNamespaces, skipTables,
366      operationType);
367  }
368
369  private void handleNamespaceAcl(Set<String> namespaces, Set<String> users,
370      Set<String> skipNamespaces, Set<TableName> skipTables,
371      HDFSAclOperation.OperationType operationType)
372      throws ExecutionException, InterruptedException, IOException {
373    namespaces.removeAll(skipNamespaces);
374    namespaces.remove(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
375    // handle namespace root directories HDFS acls
376    List<HDFSAclOperation> hdfsAclOperations = new ArrayList<>();
377    Set<String> skipTableNamespaces =
378        skipTables.stream().map(TableName::getNamespaceAsString).collect(Collectors.toSet());
379    for (String ns : namespaces) {
380      /**
381       * When op is REMOVE, remove the DEFAULT namespace ACL while keep the ACCESS for skipTables,
382       * otherwise remove both the DEFAULT + ACCESS ACLs. When op is MODIFY, just operate the
383       * DEFAULT + ACCESS ACLs.
384       */
385      HDFSAclOperation.OperationType op = operationType;
386      HDFSAclOperation.AclType aclType = HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS;
387      if (operationType == HDFSAclOperation.OperationType.REMOVE
388          && skipTableNamespaces.contains(ns)) {
389        // remove namespace directories default HDFS acls for skip tables
390        op = HDFSAclOperation.OperationType.REMOVE;
391        aclType = HDFSAclOperation.AclType.DEFAULT;
392      }
393      for (Path path : getNamespaceRootPaths(ns)) {
394        hdfsAclOperations.add(new HDFSAclOperation(fs, path, users, op, false, aclType));
395      }
396    }
397    handleHDFSAclParallel(hdfsAclOperations).get();
398    // handle table directories HDFS acls
399    Set<TableName> tables = new HashSet<>();
400    for (String namespace : namespaces) {
401      tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream()
402          .filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName)
403          .collect(Collectors.toSet()));
404    }
405    handleTableAcl(tables, users, skipNamespaces, skipTables, operationType);
406  }
407
408  private void handleTableAcl(Set<TableName> tableNames, Set<String> users,
409      Set<String> skipNamespaces, Set<TableName> skipTables,
410      HDFSAclOperation.OperationType operationType)
411      throws ExecutionException, InterruptedException, IOException {
412    Set<TableName> filterTableNames = new HashSet<>();
413    for (TableName tableName : tableNames) {
414      if (!skipTables.contains(tableName)
415          && !skipNamespaces.contains(tableName.getNamespaceAsString())) {
416        filterTableNames.add(tableName);
417      }
418    }
419    List<CompletableFuture<Void>> futures = new ArrayList<>();
420    // handle table HDFS acls
421    for (TableName tableName : filterTableNames) {
422      List<HDFSAclOperation> hdfsAclOperations = getTableRootPaths(tableName, true).stream()
423          .map(path -> new HDFSAclOperation(fs, path, users, operationType, true,
424              HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS))
425          .collect(Collectors.toList());
426      CompletableFuture<Void> future = handleHDFSAclSequential(hdfsAclOperations);
427      futures.add(future);
428    }
429    CompletableFuture<Void> future =
430        CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
431    future.get();
432  }
433
434  private void handleNamespaceAccessAcl(String namespace, Set<String> users,
435      HDFSAclOperation.OperationType operationType)
436      throws ExecutionException, InterruptedException {
437    // handle namespace access HDFS acls
438    List<HDFSAclOperation> hdfsAclOperations =
439        getNamespaceRootPaths(namespace).stream().map(path -> new HDFSAclOperation(fs, path, users,
440            operationType, false, HDFSAclOperation.AclType.ACCESS)).collect(Collectors.toList());
441    CompletableFuture<Void> future = handleHDFSAclParallel(hdfsAclOperations);
442    future.get();
443  }
444
445  void createTableDirectories(TableName tableName) throws IOException {
446    List<Path> paths = getTableRootPaths(tableName, false);
447    for (Path path : paths) {
448      createDirIfNotExist(path);
449    }
450  }
451
452  /**
453   * return paths that user will global permission will visit
454   * @return the path list
455   */
456  List<Path> getGlobalRootPaths() {
457    return Lists.newArrayList(pathHelper.getTmpDataDir(), pathHelper.getDataDir(),
458      pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), pathHelper.getSnapshotRootDir());
459  }
460
461  /**
462   * return paths that user will namespace permission will visit
463   * @param namespace the namespace
464   * @return the path list
465   */
466  List<Path> getNamespaceRootPaths(String namespace) {
467    return Lists.newArrayList(pathHelper.getTmpNsDir(namespace), pathHelper.getDataNsDir(namespace),
468      pathHelper.getMobDataNsDir(namespace), pathHelper.getArchiveNsDir(namespace));
469  }
470
471  /**
472   * return paths that user will table permission will visit
473   * @param tableName the table
474   * @param includeSnapshotPath true if return table snapshots paths, otherwise false
475   * @return the path list
476   * @throws IOException if an error occurred
477   */
478  List<Path> getTableRootPaths(TableName tableName, boolean includeSnapshotPath)
479      throws IOException {
480    List<Path> paths = Lists.newArrayList(pathHelper.getTmpTableDir(tableName),
481      pathHelper.getDataTableDir(tableName), pathHelper.getMobTableDir(tableName),
482      pathHelper.getArchiveTableDir(tableName));
483    if (includeSnapshotPath) {
484      paths.addAll(getTableSnapshotPaths(tableName));
485    }
486    return paths;
487  }
488
489  private List<Path> getTableSnapshotPaths(TableName tableName) throws IOException {
490    return admin.listSnapshots().stream()
491        .filter(snapDesc -> snapDesc.getTableName().equals(tableName))
492        .map(snapshotDescription -> pathHelper.getSnapshotDir(snapshotDescription.getName()))
493        .collect(Collectors.toList());
494  }
495
496  /**
497   * Return users with global read permission
498   * @return users with global read permission
499   * @throws IOException if an error occurred
500   */
501  private Set<String> getUsersWithGlobalReadAction() throws IOException {
502    return getUsersWithReadAction(PermissionStorage.getGlobalPermissions(conf));
503  }
504
505  /**
506   * Return users with namespace read permission
507   * @param namespace the namespace
508   * @param includeGlobal true if include users with global read action
509   * @return users with namespace read permission
510   * @throws IOException if an error occurred
511   */
512  Set<String> getUsersWithNamespaceReadAction(String namespace, boolean includeGlobal)
513      throws IOException {
514    Set<String> users =
515        getUsersWithReadAction(PermissionStorage.getNamespacePermissions(conf, namespace));
516    if (includeGlobal) {
517      users.addAll(getUsersWithGlobalReadAction());
518    }
519    return users;
520  }
521
522  /**
523   * Return users with table read permission
524   * @param tableName the table
525   * @param includeNamespace true if include users with namespace read action
526   * @param includeGlobal true if include users with global read action
527   * @return users with table read permission
528   * @throws IOException if an error occurred
529   */
530  Set<String> getUsersWithTableReadAction(TableName tableName, boolean includeNamespace,
531      boolean includeGlobal) throws IOException {
532    Set<String> users =
533        getUsersWithReadAction(PermissionStorage.getTablePermissions(conf, tableName));
534    if (includeNamespace) {
535      users
536          .addAll(getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), includeGlobal));
537    }
538    return users;
539  }
540
541  private Set<String>
542      getUsersWithReadAction(ListMultimap<String, UserPermission> permissionMultimap) {
543    return permissionMultimap.entries().stream()
544        .filter(entry -> checkUserPermission(entry.getValue())).map(Map.Entry::getKey)
545        .collect(Collectors.toSet());
546  }
547
548  private boolean checkUserPermission(UserPermission userPermission) {
549    boolean result = containReadAction(userPermission);
550    if (result && userPermission.getPermission() instanceof TablePermission) {
551      result = isNotFamilyOrQualifierPermission((TablePermission) userPermission.getPermission());
552    }
553    return result;
554  }
555
556  boolean containReadAction(UserPermission userPermission) {
557    return userPermission.getPermission().implies(Permission.Action.READ);
558  }
559
560  boolean isNotFamilyOrQualifierPermission(TablePermission tablePermission) {
561    return !tablePermission.hasFamily() && !tablePermission.hasQualifier();
562  }
563
564  public static boolean isAclSyncToHdfsEnabled(Configuration conf) {
565    String[] masterCoprocessors = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
566    Set<String> masterCoprocessorSet = new HashSet<>();
567    if (masterCoprocessors != null) {
568      Collections.addAll(masterCoprocessorSet, masterCoprocessors);
569    }
570    return conf.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)
571        && masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName())
572        && masterCoprocessorSet.contains(AccessController.class.getName());
573  }
574
575  boolean isAclSyncToHdfsEnabled(TableDescriptor tableDescriptor) {
576    return tableDescriptor == null ? false
577        : Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE));
578  }
579
580  PathHelper getPathHelper() {
581    return pathHelper;
582  }
583
584  private CompletableFuture<Void> handleHDFSAcl(HDFSAclOperation acl) {
585    return CompletableFuture.supplyAsync(() -> {
586      List<HDFSAclOperation> childAclOperations = new ArrayList<>();
587      try {
588        acl.handleAcl();
589        childAclOperations = acl.getChildAclOperations();
590      } catch (FileNotFoundException e) {
591        // Skip handle acl if file not found
592      } catch (IOException e) {
593        LOG.error("Set HDFS acl error for path {}", acl.path, e);
594      }
595      return childAclOperations;
596    }, pool).thenComposeAsync(this::handleHDFSAclParallel, pool);
597  }
598
599  private CompletableFuture<Void> handleHDFSAclSequential(List<HDFSAclOperation> operations) {
600    return CompletableFuture.supplyAsync(() -> {
601      try {
602        for (HDFSAclOperation hdfsAclOperation : operations) {
603          handleHDFSAcl(hdfsAclOperation).get();
604        }
605      } catch (InterruptedException | ExecutionException e) {
606        LOG.error("Set HDFS acl error", e);
607      }
608      return null;
609    }, pool);
610  }
611
612  private CompletableFuture<Void> handleHDFSAclParallel(List<HDFSAclOperation> operations) {
613    List<CompletableFuture<Void>> futures =
614        operations.stream().map(this::handleHDFSAcl).collect(Collectors.toList());
615    return CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
616  }
617
618  private static AclEntry aclEntry(AclEntryScope scope, String name) {
619    return new AclEntry.Builder().setScope(scope)
620        .setType(AuthUtil.isGroupPrincipal(name) ? GROUP : USER).setName(name)
621        .setPermission(READ_EXECUTE).build();
622  }
623
624  void createDirIfNotExist(Path path) throws IOException {
625    if (!fs.exists(path)) {
626      fs.mkdirs(path);
627    }
628  }
629
630  void deleteEmptyDir(Path path) throws IOException {
631    if (fs.exists(path) && fs.listStatus(path).length == 0) {
632      fs.delete(path, false);
633    }
634  }
635
636  /**
637   * Inner class used to describe modify or remove what type of acl entries(ACCESS, DEFAULT,
638   * ACCESS_AND_DEFAULT) for files or directories(and child files).
639   */
640  private static class HDFSAclOperation {
641    enum OperationType {
642      MODIFY, REMOVE
643    }
644
645    enum AclType {
646      ACCESS, DEFAULT, DEFAULT_ADN_ACCESS
647    }
648
649    private interface Operation {
650      void apply(FileSystem fs, Path path, List<AclEntry> aclList) throws IOException;
651    }
652
653    private FileSystem fs;
654    private Path path;
655    private Operation operation;
656    private boolean recursive;
657    private AclType aclType;
658    private List<AclEntry> defaultAndAccessAclEntries;
659    private List<AclEntry> accessAclEntries;
660    private List<AclEntry> defaultAclEntries;
661
662    HDFSAclOperation(FileSystem fs, Path path, Set<String> users, OperationType operationType,
663        boolean recursive, AclType aclType) {
664      this.fs = fs;
665      this.path = path;
666      this.defaultAndAccessAclEntries = getAclEntries(AclType.DEFAULT_ADN_ACCESS, users);
667      this.accessAclEntries = getAclEntries(AclType.ACCESS, users);
668      this.defaultAclEntries = getAclEntries(AclType.DEFAULT, users);
669      if (operationType == OperationType.MODIFY) {
670        operation = FileSystem::modifyAclEntries;
671      } else if (operationType == OperationType.REMOVE) {
672        operation = FileSystem::removeAclEntries;
673      } else {
674        throw new IllegalArgumentException("Illegal HDFS acl operation type: " + operationType);
675      }
676      this.recursive = recursive;
677      this.aclType = aclType;
678    }
679
680    HDFSAclOperation(Path path, HDFSAclOperation parent) {
681      this.fs = parent.fs;
682      this.path = path;
683      this.defaultAndAccessAclEntries = parent.defaultAndAccessAclEntries;
684      this.accessAclEntries = parent.accessAclEntries;
685      this.defaultAclEntries = parent.defaultAclEntries;
686      this.operation = parent.operation;
687      this.recursive = parent.recursive;
688      this.aclType = parent.aclType;
689    }
690
691    List<HDFSAclOperation> getChildAclOperations() throws IOException {
692      List<HDFSAclOperation> hdfsAclOperations = new ArrayList<>();
693      if (recursive && fs.isDirectory(path)) {
694        FileStatus[] fileStatuses = fs.listStatus(path);
695        for (FileStatus fileStatus : fileStatuses) {
696          hdfsAclOperations.add(new HDFSAclOperation(fileStatus.getPath(), this));
697        }
698      }
699      return hdfsAclOperations;
700    }
701
702    void handleAcl() throws IOException {
703      if (fs.exists(path)) {
704        if (fs.isDirectory(path)) {
705          switch (aclType) {
706            case ACCESS:
707              operation.apply(fs, path, accessAclEntries);
708              break;
709            case DEFAULT:
710              operation.apply(fs, path, defaultAclEntries);
711              break;
712            case DEFAULT_ADN_ACCESS:
713              operation.apply(fs, path, defaultAndAccessAclEntries);
714              break;
715            default:
716              throw new IllegalArgumentException("Illegal HDFS acl type: " + aclType);
717          }
718        } else {
719          operation.apply(fs, path, accessAclEntries);
720        }
721      }
722    }
723
724    private List<AclEntry> getAclEntries(AclType aclType, Set<String> users) {
725      List<AclEntry> aclEntries = new ArrayList<>();
726      switch (aclType) {
727        case ACCESS:
728          for (String user : users) {
729            aclEntries.add(aclEntry(ACCESS, user));
730          }
731          break;
732        case DEFAULT:
733          for (String user : users) {
734            aclEntries.add(aclEntry(DEFAULT, user));
735          }
736          break;
737        case DEFAULT_ADN_ACCESS:
738          for (String user : users) {
739            aclEntries.add(aclEntry(ACCESS, user));
740            aclEntries.add(aclEntry(DEFAULT, user));
741          }
742          break;
743        default:
744          throw new IllegalArgumentException("Illegal HDFS acl type: " + aclType);
745      }
746      return aclEntries;
747    }
748  }
749
750  static final class PathHelper {
751    Configuration conf;
752    Path rootDir;
753    Path tmpDataDir;
754    Path dataDir;
755    Path mobDataDir;
756    Path archiveDataDir;
757    Path snapshotDir;
758
759    PathHelper(Configuration conf) {
760      this.conf = conf;
761      rootDir = new Path(conf.get(HConstants.HBASE_DIR));
762      tmpDataDir = new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY),
763          HConstants.BASE_NAMESPACE_DIR);
764      dataDir = new Path(rootDir, HConstants.BASE_NAMESPACE_DIR);
765      mobDataDir = new Path(MobUtils.getMobHome(rootDir), HConstants.BASE_NAMESPACE_DIR);
766      archiveDataDir = new Path(new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY),
767          HConstants.BASE_NAMESPACE_DIR);
768      snapshotDir = new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
769    }
770
771    Path getRootDir() {
772      return rootDir;
773    }
774
775    Path getDataDir() {
776      return dataDir;
777    }
778
779    Path getMobDir() {
780      return mobDataDir.getParent();
781    }
782
783    Path getMobDataDir() {
784      return mobDataDir;
785    }
786
787    Path getTmpDir() {
788      return new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY);
789    }
790
791    Path getTmpDataDir() {
792      return tmpDataDir;
793    }
794
795    Path getArchiveDir() {
796      return new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
797    }
798
799    Path getArchiveDataDir() {
800      return archiveDataDir;
801    }
802
803    Path getDataNsDir(String namespace) {
804      return new Path(dataDir, namespace);
805    }
806
807    Path getMobDataNsDir(String namespace) {
808      return new Path(mobDataDir, namespace);
809    }
810
811    Path getDataTableDir(TableName tableName) {
812      return new Path(getDataNsDir(tableName.getNamespaceAsString()),
813          tableName.getQualifierAsString());
814    }
815
816    Path getMobTableDir(TableName tableName) {
817      return new Path(getMobDataNsDir(tableName.getNamespaceAsString()),
818          tableName.getQualifierAsString());
819    }
820
821    Path getArchiveNsDir(String namespace) {
822      return new Path(archiveDataDir, namespace);
823    }
824
825    Path getArchiveTableDir(TableName tableName) {
826      return new Path(getArchiveNsDir(tableName.getNamespaceAsString()),
827          tableName.getQualifierAsString());
828    }
829
830    Path getTmpNsDir(String namespace) {
831      return new Path(tmpDataDir, namespace);
832    }
833
834    Path getTmpTableDir(TableName tableName) {
835      return new Path(getTmpNsDir(tableName.getNamespaceAsString()),
836          tableName.getQualifierAsString());
837    }
838
839    Path getSnapshotRootDir() {
840      return snapshotDir;
841    }
842
843    Path getSnapshotDir(String snapshot) {
844      return new Path(snapshotDir, snapshot);
845    }
846
847    FileSystem getFileSystem() throws IOException {
848      return rootDir.getFileSystem(conf);
849    }
850  }
851}