001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.security.access;
019
020import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
021import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
022import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
023import static org.apache.hadoop.fs.permission.AclEntryType.USER;
024import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
025
026import java.io.Closeable;
027import java.io.FileNotFoundException;
028import java.io.IOException;
029import java.util.ArrayList;
030import java.util.Collections;
031import java.util.HashSet;
032import java.util.List;
033import java.util.Map;
034import java.util.Set;
035import java.util.concurrent.CompletableFuture;
036import java.util.concurrent.ExecutionException;
037import java.util.concurrent.ExecutorService;
038import java.util.concurrent.Executors;
039import java.util.stream.Collectors;
040import org.apache.hadoop.conf.Configuration;
041import org.apache.hadoop.fs.FileStatus;
042import org.apache.hadoop.fs.FileSystem;
043import org.apache.hadoop.fs.Path;
044import org.apache.hadoop.fs.permission.AclEntry;
045import org.apache.hadoop.fs.permission.AclEntryScope;
046import org.apache.hadoop.fs.permission.FsPermission;
047import org.apache.hadoop.hbase.AuthUtil;
048import org.apache.hadoop.hbase.HConstants;
049import org.apache.hadoop.hbase.NamespaceDescriptor;
050import org.apache.hadoop.hbase.TableName;
051import org.apache.hadoop.hbase.client.Admin;
052import org.apache.hadoop.hbase.client.Connection;
053import org.apache.hadoop.hbase.client.SnapshotDescription;
054import org.apache.hadoop.hbase.client.TableDescriptor;
055import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
056import org.apache.hadoop.hbase.mob.MobUtils;
057import org.apache.hadoop.hbase.util.Bytes;
058import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
059import org.apache.yetus.audience.InterfaceAudience;
060import org.slf4j.Logger;
061import org.slf4j.LoggerFactory;
062
063import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
064import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
065import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
066import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
067
068/**
069 * A helper to modify or remove HBase granted user default and access HDFS ACLs over hFiles.
070 */
071@InterfaceAudience.Private
072public class SnapshotScannerHDFSAclHelper implements Closeable {
073  private static final Logger LOG = LoggerFactory.getLogger(SnapshotScannerHDFSAclHelper.class);
074
075  public static final String ACL_SYNC_TO_HDFS_ENABLE = "hbase.acl.sync.to.hdfs.enable";
076  public static final String ACL_SYNC_TO_HDFS_THREAD_NUMBER =
077    "hbase.acl.sync.to.hdfs.thread.number";
078  // The tmp directory to restore snapshot, it can not be a sub directory of HBase root dir
079  public static final String SNAPSHOT_RESTORE_TMP_DIR = "hbase.snapshot.restore.tmp.dir";
080  public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT =
081    "/hbase/.tmpdir-to-restore-snapshot";
082  // The default permission of the common directories if the feature is enabled.
083  public static final String COMMON_DIRECTORY_PERMISSION =
084    "hbase.acl.sync.to.hdfs.common.directory.permission";
085  // The secure HBase permission is 700, 751 means all others have execute access and the mask is
086  // set to read-execute to make the extended access ACL entries can work. Be cautious to set
087  // this value.
088  public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751";
089  // The default permission of the snapshot restore directories if the feature is enabled.
090  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION =
091    "hbase.acl.sync.to.hdfs.restore.directory.permission";
092  // 753 means all others have write-execute access.
093  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = "753";
094
095  private Admin admin;
096  private final Configuration conf;
097  private FileSystem fs;
098  private PathHelper pathHelper;
099  private ExecutorService pool;
100
101  public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection connection)
102    throws IOException {
103    this.conf = configuration;
104    this.pathHelper = new PathHelper(conf);
105    this.fs = pathHelper.getFileSystem();
106    this.pool = Executors.newFixedThreadPool(conf.getInt(ACL_SYNC_TO_HDFS_THREAD_NUMBER, 10),
107      new ThreadFactoryBuilder().setNameFormat("hdfs-acl-thread-%d").setDaemon(true).build());
108    this.admin = connection.getAdmin();
109  }
110
111  @Override
112  public void close() {
113    if (pool != null) {
114      pool.shutdown();
115    }
116    try {
117      admin.close();
118    } catch (IOException e) {
119      LOG.error("Close admin error", e);
120    }
121  }
122
123  public void setCommonDirectoryPermission() throws IOException {
124    // Set public directory permission to 751 to make all users have access permission.
125    // And we also need the access permission of the parent of HBase root directory, but
126    // it's not set here, because the owner of HBase root directory may don't own permission
127    // to change it's parent permission to 751.
128    // The {root/.tmp} and {root/.tmp/data} directories are created to make global user HDFS
129    // ACLs can be inherited.
130    List<Path> paths = Lists.newArrayList(pathHelper.getRootDir(), pathHelper.getMobDir(),
131      pathHelper.getTmpDir(), pathHelper.getArchiveDir());
132    paths.addAll(getGlobalRootPaths());
133    for (Path path : paths) {
134      createDirIfNotExist(path);
135      fs.setPermission(path, new FsPermission(
136        conf.get(COMMON_DIRECTORY_PERMISSION, COMMON_DIRECTORY_PERMISSION_DEFAULT)));
137    }
138    // create snapshot restore directory
139    Path restoreDir =
140      new Path(conf.get(SNAPSHOT_RESTORE_TMP_DIR, SNAPSHOT_RESTORE_TMP_DIR_DEFAULT));
141    createDirIfNotExist(restoreDir);
142    fs.setPermission(restoreDir, new FsPermission(conf.get(SNAPSHOT_RESTORE_DIRECTORY_PERMISSION,
143      SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT)));
144  }
145
146  /**
147   * Set acl when grant user permission
148   * @param userPermission the user and permission
149   * @param skipNamespaces the namespace set to skip set acl because already set
150   * @param skipTables     the table set to skip set acl because already set
151   * @return false if an error occurred, otherwise true
152   */
153  public boolean grantAcl(UserPermission userPermission, Set<String> skipNamespaces,
154    Set<TableName> skipTables) {
155    try {
156      long start = EnvironmentEdgeManager.currentTime();
157      handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.MODIFY, skipNamespaces,
158        skipTables);
159      LOG.info("Set HDFS acl when grant {}, cost {} ms", userPermission,
160        EnvironmentEdgeManager.currentTime() - start);
161      return true;
162    } catch (Exception e) {
163      LOG.error("Set HDFS acl error when grant: {}", userPermission, e);
164      return false;
165    }
166  }
167
168  /**
169   * Remove acl when grant or revoke user permission
170   * @param userPermission the user and permission
171   * @param skipNamespaces the namespace set to skip remove acl
172   * @param skipTables     the table set to skip remove acl
173   * @return false if an error occurred, otherwise true
174   */
175  public boolean revokeAcl(UserPermission userPermission, Set<String> skipNamespaces,
176    Set<TableName> skipTables) {
177    try {
178      long start = EnvironmentEdgeManager.currentTime();
179      handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.REMOVE, skipNamespaces,
180        skipTables);
181      LOG.info("Set HDFS acl when revoke {}, cost {} ms", userPermission,
182        EnvironmentEdgeManager.currentTime() - start);
183      return true;
184    } catch (Exception e) {
185      LOG.error("Set HDFS acl error when revoke: {}", userPermission, e);
186      return false;
187    }
188  }
189
190  /**
191   * Set acl when take a snapshot
192   * @param snapshot the snapshot desc
193   * @return false if an error occurred, otherwise true
194   */
195  public boolean snapshotAcl(SnapshotDescription snapshot) {
196    try {
197      long start = EnvironmentEdgeManager.currentTime();
198      TableName tableName = snapshot.getTableName();
199      // global user permission can be inherited from default acl automatically
200      Set<String> userSet = getUsersWithTableReadAction(tableName, true, false);
201      if (userSet.size() > 0) {
202        Path path = pathHelper.getSnapshotDir(snapshot.getName());
203        handleHDFSAcl(new HDFSAclOperation(fs, path, userSet, HDFSAclOperation.OperationType.MODIFY,
204          true, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).get();
205      }
206      LOG.info("Set HDFS acl when snapshot {}, cost {} ms", snapshot.getName(),
207        EnvironmentEdgeManager.currentTime() - start);
208      return true;
209    } catch (Exception e) {
210      LOG.error("Set HDFS acl error when snapshot {}", snapshot, e);
211      return false;
212    }
213  }
214
215  /**
216   * Remove table access acl from namespace dir when delete table
217   * @param tableName   the table
218   * @param removeUsers the users whose access acl will be removed
219   * @return false if an error occurred, otherwise true
220   */
221  public boolean removeNamespaceAccessAcl(TableName tableName, Set<String> removeUsers,
222    String operation) {
223    try {
224      long start = EnvironmentEdgeManager.currentTime();
225      if (removeUsers.size() > 0) {
226        handleNamespaceAccessAcl(tableName.getNamespaceAsString(), removeUsers,
227          HDFSAclOperation.OperationType.REMOVE);
228      }
229      LOG.info("Remove HDFS acl when {} table {}, cost {} ms", operation, tableName,
230        EnvironmentEdgeManager.currentTime() - start);
231      return true;
232    } catch (Exception e) {
233      LOG.error("Remove HDFS acl error when {} table {}", operation, tableName, e);
234      return false;
235    }
236  }
237
238  /**
239   * Remove default acl from namespace archive dir when delete namespace
240   * @param namespace   the namespace
241   * @param removeUsers the users whose default acl will be removed
242   * @return false if an error occurred, otherwise true
243   */
244  public boolean removeNamespaceDefaultAcl(String namespace, Set<String> removeUsers) {
245    try {
246      long start = EnvironmentEdgeManager.currentTime();
247      Path archiveNsDir = pathHelper.getArchiveNsDir(namespace);
248      HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers,
249        HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
250      operation.handleAcl();
251      LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace,
252        EnvironmentEdgeManager.currentTime() - start);
253      return true;
254    } catch (Exception e) {
255      LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e);
256      return false;
257    }
258  }
259
260  /**
261   * Remove default acl from table archive dir when delete table
262   * @param tableName   the table name
263   * @param removeUsers the users whose default acl will be removed
264   * @return false if an error occurred, otherwise true
265   */
266  public boolean removeTableDefaultAcl(TableName tableName, Set<String> removeUsers) {
267    try {
268      long start = EnvironmentEdgeManager.currentTime();
269      Path archiveTableDir = pathHelper.getArchiveTableDir(tableName);
270      HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers,
271        HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
272      operation.handleAcl();
273      LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName,
274        EnvironmentEdgeManager.currentTime() - start);
275      return true;
276    } catch (Exception e) {
277      LOG.error("Remove HDFS acl error when delete table {}", tableName, e);
278      return false;
279    }
280  }
281
282  /**
283   * Add table user acls
284   * @param tableName the table
285   * @param users     the table users with READ permission
286   * @return false if an error occurred, otherwise true
287   */
288  public boolean addTableAcl(TableName tableName, Set<String> users, String operation) {
289    try {
290      long start = EnvironmentEdgeManager.currentTime();
291      if (users.size() > 0) {
292        HDFSAclOperation.OperationType operationType = HDFSAclOperation.OperationType.MODIFY;
293        handleNamespaceAccessAcl(tableName.getNamespaceAsString(), users, operationType);
294        handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0),
295          operationType);
296      }
297      LOG.info("Set HDFS acl when {} table {}, cost {} ms", operation, tableName,
298        EnvironmentEdgeManager.currentTime() - start);
299      return true;
300    } catch (Exception e) {
301      LOG.error("Set HDFS acl error when {} table {}", operation, tableName, e);
302      return false;
303    }
304  }
305
306  /**
307   * Remove table acls when modify table
308   * @param tableName the table
309   * @param users     the table users with READ permission
310   * @return false if an error occurred, otherwise true
311   */
312  public boolean removeTableAcl(TableName tableName, Set<String> users) {
313    try {
314      long start = EnvironmentEdgeManager.currentTime();
315      if (users.size() > 0) {
316        handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0),
317          HDFSAclOperation.OperationType.REMOVE);
318      }
319      LOG.info("Set HDFS acl when create or modify table {}, cost {} ms", tableName,
320        EnvironmentEdgeManager.currentTime() - start);
321      return true;
322    } catch (Exception e) {
323      LOG.error("Set HDFS acl error when create or modify table {}", tableName, e);
324      return false;
325    }
326  }
327
328  private void handleGrantOrRevokeAcl(UserPermission userPermission,
329    HDFSAclOperation.OperationType operationType, Set<String> skipNamespaces,
330    Set<TableName> skipTables) throws ExecutionException, InterruptedException, IOException {
331    Set<String> users = Sets.newHashSet(userPermission.getUser());
332    switch (userPermission.getAccessScope()) {
333      case GLOBAL:
334        handleGlobalAcl(users, skipNamespaces, skipTables, operationType);
335        break;
336      case NAMESPACE:
337        NamespacePermission namespacePermission =
338          (NamespacePermission) userPermission.getPermission();
339        handleNamespaceAcl(Sets.newHashSet(namespacePermission.getNamespace()), users,
340          skipNamespaces, skipTables, operationType);
341        break;
342      case TABLE:
343        TablePermission tablePermission = (TablePermission) userPermission.getPermission();
344        handleNamespaceAccessAcl(tablePermission.getNamespace(), users, operationType);
345        handleTableAcl(Sets.newHashSet(tablePermission.getTableName()), users, skipNamespaces,
346          skipTables, operationType);
347        break;
348      default:
349        throw new IllegalArgumentException(
350          "Illegal user permission scope " + userPermission.getAccessScope());
351    }
352  }
353
354  private void handleGlobalAcl(Set<String> users, Set<String> skipNamespaces,
355    Set<TableName> skipTables, HDFSAclOperation.OperationType operationType)
356    throws ExecutionException, InterruptedException, IOException {
357    // handle global root directories HDFS acls
358    List<HDFSAclOperation> hdfsAclOperations =
359      getGlobalRootPaths().stream().map(path -> new HDFSAclOperation(fs, path, users, operationType,
360        false, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).collect(Collectors.toList());
361    handleHDFSAclParallel(hdfsAclOperations).get();
362    // handle namespace HDFS acls
363    handleNamespaceAcl(Sets.newHashSet(admin.listNamespaces()), users, skipNamespaces, skipTables,
364      operationType);
365  }
366
367  private void handleNamespaceAcl(Set<String> namespaces, Set<String> users,
368    Set<String> skipNamespaces, Set<TableName> skipTables,
369    HDFSAclOperation.OperationType operationType)
370    throws ExecutionException, InterruptedException, IOException {
371    namespaces.removeAll(skipNamespaces);
372    namespaces.remove(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
373    // handle namespace root directories HDFS acls
374    List<HDFSAclOperation> hdfsAclOperations = new ArrayList<>();
375    Set<String> skipTableNamespaces =
376      skipTables.stream().map(TableName::getNamespaceAsString).collect(Collectors.toSet());
377    for (String ns : namespaces) {
378      /**
379       * When op is REMOVE, remove the DEFAULT namespace ACL while keep the ACCESS for skipTables,
380       * otherwise remove both the DEFAULT + ACCESS ACLs. When op is MODIFY, just operate the
381       * DEFAULT + ACCESS ACLs.
382       */
383      HDFSAclOperation.OperationType op = operationType;
384      HDFSAclOperation.AclType aclType = HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS;
385      if (
386        operationType == HDFSAclOperation.OperationType.REMOVE && skipTableNamespaces.contains(ns)
387      ) {
388        // remove namespace directories default HDFS acls for skip tables
389        op = HDFSAclOperation.OperationType.REMOVE;
390        aclType = HDFSAclOperation.AclType.DEFAULT;
391      }
392      for (Path path : getNamespaceRootPaths(ns)) {
393        hdfsAclOperations.add(new HDFSAclOperation(fs, path, users, op, false, aclType));
394      }
395    }
396    handleHDFSAclParallel(hdfsAclOperations).get();
397    // handle table directories HDFS acls
398    Set<TableName> tables = new HashSet<>();
399    for (String namespace : namespaces) {
400      tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream()
401        .filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName)
402        .collect(Collectors.toSet()));
403    }
404    handleTableAcl(tables, users, skipNamespaces, skipTables, operationType);
405  }
406
407  private void handleTableAcl(Set<TableName> tableNames, Set<String> users,
408    Set<String> skipNamespaces, Set<TableName> skipTables,
409    HDFSAclOperation.OperationType operationType)
410    throws ExecutionException, InterruptedException, IOException {
411    Set<TableName> filterTableNames = new HashSet<>();
412    for (TableName tableName : tableNames) {
413      if (
414        !skipTables.contains(tableName)
415          && !skipNamespaces.contains(tableName.getNamespaceAsString())
416      ) {
417        filterTableNames.add(tableName);
418      }
419    }
420    List<CompletableFuture<Void>> futures = new ArrayList<>();
421    // handle table HDFS acls
422    for (TableName tableName : filterTableNames) {
423      List<HDFSAclOperation> hdfsAclOperations = getTableRootPaths(tableName, true).stream()
424        .map(path -> new HDFSAclOperation(fs, path, users, operationType, true,
425          HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS))
426        .collect(Collectors.toList());
427      CompletableFuture<Void> future = handleHDFSAclSequential(hdfsAclOperations);
428      futures.add(future);
429    }
430    CompletableFuture<Void> future =
431      CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
432    future.get();
433  }
434
435  private void handleNamespaceAccessAcl(String namespace, Set<String> users,
436    HDFSAclOperation.OperationType operationType) throws ExecutionException, InterruptedException {
437    // handle namespace access HDFS acls
438    List<HDFSAclOperation> hdfsAclOperations =
439      getNamespaceRootPaths(namespace).stream().map(path -> new HDFSAclOperation(fs, path, users,
440        operationType, false, HDFSAclOperation.AclType.ACCESS)).collect(Collectors.toList());
441    CompletableFuture<Void> future = handleHDFSAclParallel(hdfsAclOperations);
442    future.get();
443  }
444
445  void createTableDirectories(TableName tableName) throws IOException {
446    List<Path> paths = getTableRootPaths(tableName, false);
447    for (Path path : paths) {
448      createDirIfNotExist(path);
449    }
450  }
451
452  /**
453   * return paths that user will global permission will visit
454   * @return the path list
455   */
456  List<Path> getGlobalRootPaths() {
457    return Lists.newArrayList(pathHelper.getTmpDataDir(), pathHelper.getDataDir(),
458      pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), pathHelper.getSnapshotRootDir());
459  }
460
461  /**
462   * return paths that user will namespace permission will visit
463   * @param namespace the namespace
464   * @return the path list
465   */
466  List<Path> getNamespaceRootPaths(String namespace) {
467    return Lists.newArrayList(pathHelper.getTmpNsDir(namespace), pathHelper.getDataNsDir(namespace),
468      pathHelper.getMobDataNsDir(namespace), pathHelper.getArchiveNsDir(namespace));
469  }
470
471  /**
472   * return paths that user will table permission will visit
473   * @param tableName           the table
474   * @param includeSnapshotPath true if return table snapshots paths, otherwise false
475   * @return the path list
476   * @throws IOException if an error occurred
477   */
478  List<Path> getTableRootPaths(TableName tableName, boolean includeSnapshotPath)
479    throws IOException {
480    List<Path> paths = Lists.newArrayList(pathHelper.getDataTableDir(tableName),
481      pathHelper.getMobTableDir(tableName), pathHelper.getArchiveTableDir(tableName));
482    if (includeSnapshotPath) {
483      paths.addAll(getTableSnapshotPaths(tableName));
484    }
485    return paths;
486  }
487
488  private List<Path> getTableSnapshotPaths(TableName tableName) throws IOException {
489    return admin.listSnapshots().stream()
490      .filter(snapDesc -> snapDesc.getTableName().equals(tableName))
491      .map(snapshotDescription -> pathHelper.getSnapshotDir(snapshotDescription.getName()))
492      .collect(Collectors.toList());
493  }
494
495  /**
496   * Return users with global read permission
497   * @return users with global read permission
498   * @throws IOException if an error occurred
499   */
500  private Set<String> getUsersWithGlobalReadAction() throws IOException {
501    return getUsersWithReadAction(PermissionStorage.getGlobalPermissions(conf));
502  }
503
504  /**
505   * Return users with namespace read permission
506   * @param namespace     the namespace
507   * @param includeGlobal true if include users with global read action
508   * @return users with namespace read permission
509   * @throws IOException if an error occurred
510   */
511  Set<String> getUsersWithNamespaceReadAction(String namespace, boolean includeGlobal)
512    throws IOException {
513    Set<String> users =
514      getUsersWithReadAction(PermissionStorage.getNamespacePermissions(conf, namespace));
515    if (includeGlobal) {
516      users.addAll(getUsersWithGlobalReadAction());
517    }
518    return users;
519  }
520
521  /**
522   * Return users with table read permission
523   * @param tableName        the table
524   * @param includeNamespace true if include users with namespace read action
525   * @param includeGlobal    true if include users with global read action
526   * @return users with table read permission
527   * @throws IOException if an error occurred
528   */
529  Set<String> getUsersWithTableReadAction(TableName tableName, boolean includeNamespace,
530    boolean includeGlobal) throws IOException {
531    Set<String> users =
532      getUsersWithReadAction(PermissionStorage.getTablePermissions(conf, tableName));
533    if (includeNamespace) {
534      users
535        .addAll(getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), includeGlobal));
536    }
537    return users;
538  }
539
540  private Set<String>
541    getUsersWithReadAction(ListMultimap<String, UserPermission> permissionMultimap) {
542    return permissionMultimap.entries().stream()
543      .filter(entry -> checkUserPermission(entry.getValue())).map(Map.Entry::getKey)
544      .collect(Collectors.toSet());
545  }
546
547  private boolean checkUserPermission(UserPermission userPermission) {
548    boolean result = containReadAction(userPermission);
549    if (result && userPermission.getPermission() instanceof TablePermission) {
550      result = isNotFamilyOrQualifierPermission((TablePermission) userPermission.getPermission());
551    }
552    return result;
553  }
554
555  boolean containReadAction(UserPermission userPermission) {
556    return userPermission.getPermission().implies(Permission.Action.READ);
557  }
558
559  boolean isNotFamilyOrQualifierPermission(TablePermission tablePermission) {
560    return !tablePermission.hasFamily() && !tablePermission.hasQualifier();
561  }
562
563  public static boolean isAclSyncToHdfsEnabled(Configuration conf) {
564    String[] masterCoprocessors = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
565    Set<String> masterCoprocessorSet = new HashSet<>();
566    if (masterCoprocessors != null) {
567      Collections.addAll(masterCoprocessorSet, masterCoprocessors);
568    }
569    return conf.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)
570      && masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName())
571      && masterCoprocessorSet.contains(AccessController.class.getName());
572  }
573
574  boolean isAclSyncToHdfsEnabled(TableDescriptor tableDescriptor) {
575    return tableDescriptor == null
576      ? false
577      : Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE));
578  }
579
580  PathHelper getPathHelper() {
581    return pathHelper;
582  }
583
584  private CompletableFuture<Void> handleHDFSAcl(HDFSAclOperation acl) {
585    return CompletableFuture.supplyAsync(() -> {
586      List<HDFSAclOperation> childAclOperations = new ArrayList<>();
587      try {
588        acl.handleAcl();
589        childAclOperations = acl.getChildAclOperations();
590      } catch (FileNotFoundException e) {
591        // Skip handle acl if file not found
592      } catch (IOException e) {
593        LOG.error("Set HDFS acl error for path {}", acl.path, e);
594      }
595      return childAclOperations;
596    }, pool).thenComposeAsync(this::handleHDFSAclParallel, pool);
597  }
598
599  private CompletableFuture<Void> handleHDFSAclSequential(List<HDFSAclOperation> operations) {
600    return CompletableFuture.supplyAsync(() -> {
601      try {
602        for (HDFSAclOperation hdfsAclOperation : operations) {
603          handleHDFSAcl(hdfsAclOperation).get();
604        }
605      } catch (InterruptedException | ExecutionException e) {
606        LOG.error("Set HDFS acl error", e);
607      }
608      return null;
609    }, pool);
610  }
611
612  private CompletableFuture<Void> handleHDFSAclParallel(List<HDFSAclOperation> operations) {
613    List<CompletableFuture<Void>> futures =
614      operations.stream().map(this::handleHDFSAcl).collect(Collectors.toList());
615    return CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
616  }
617
618  private static AclEntry aclEntry(AclEntryScope scope, String name) {
619    return new AclEntry.Builder().setScope(scope)
620      .setType(AuthUtil.isGroupPrincipal(name) ? GROUP : USER).setName(name)
621      .setPermission(READ_EXECUTE).build();
622  }
623
624  void createDirIfNotExist(Path path) throws IOException {
625    if (!fs.exists(path)) {
626      fs.mkdirs(path);
627    }
628  }
629
630  void deleteEmptyDir(Path path) throws IOException {
631    if (fs.exists(path) && fs.listStatus(path).length == 0) {
632      fs.delete(path, false);
633    }
634  }
635
636  /**
637   * Inner class used to describe modify or remove what type of acl entries(ACCESS, DEFAULT,
638   * ACCESS_AND_DEFAULT) for files or directories(and child files).
639   */
640  private static class HDFSAclOperation {
641    enum OperationType {
642      MODIFY,
643      REMOVE
644    }
645
646    enum AclType {
647      ACCESS,
648      DEFAULT,
649      DEFAULT_ADN_ACCESS
650    }
651
652    private interface Operation {
653      void apply(FileSystem fs, Path path, List<AclEntry> aclList) throws IOException;
654    }
655
656    private FileSystem fs;
657    private Path path;
658    private Operation operation;
659    private boolean recursive;
660    private AclType aclType;
661    private List<AclEntry> defaultAndAccessAclEntries;
662    private List<AclEntry> accessAclEntries;
663    private List<AclEntry> defaultAclEntries;
664
665    HDFSAclOperation(FileSystem fs, Path path, Set<String> users, OperationType operationType,
666      boolean recursive, AclType aclType) {
667      this.fs = fs;
668      this.path = path;
669      this.defaultAndAccessAclEntries = getAclEntries(AclType.DEFAULT_ADN_ACCESS, users);
670      this.accessAclEntries = getAclEntries(AclType.ACCESS, users);
671      this.defaultAclEntries = getAclEntries(AclType.DEFAULT, users);
672      if (operationType == OperationType.MODIFY) {
673        operation = FileSystem::modifyAclEntries;
674      } else if (operationType == OperationType.REMOVE) {
675        operation = FileSystem::removeAclEntries;
676      } else {
677        throw new IllegalArgumentException("Illegal HDFS acl operation type: " + operationType);
678      }
679      this.recursive = recursive;
680      this.aclType = aclType;
681    }
682
683    HDFSAclOperation(Path path, HDFSAclOperation parent) {
684      this.fs = parent.fs;
685      this.path = path;
686      this.defaultAndAccessAclEntries = parent.defaultAndAccessAclEntries;
687      this.accessAclEntries = parent.accessAclEntries;
688      this.defaultAclEntries = parent.defaultAclEntries;
689      this.operation = parent.operation;
690      this.recursive = parent.recursive;
691      this.aclType = parent.aclType;
692    }
693
694    List<HDFSAclOperation> getChildAclOperations() throws IOException {
695      List<HDFSAclOperation> hdfsAclOperations = new ArrayList<>();
696      if (recursive && fs.isDirectory(path)) {
697        FileStatus[] fileStatuses = fs.listStatus(path);
698        for (FileStatus fileStatus : fileStatuses) {
699          hdfsAclOperations.add(new HDFSAclOperation(fileStatus.getPath(), this));
700        }
701      }
702      return hdfsAclOperations;
703    }
704
705    void handleAcl() throws IOException {
706      if (fs.exists(path)) {
707        if (fs.isDirectory(path)) {
708          switch (aclType) {
709            case ACCESS:
710              operation.apply(fs, path, accessAclEntries);
711              break;
712            case DEFAULT:
713              operation.apply(fs, path, defaultAclEntries);
714              break;
715            case DEFAULT_ADN_ACCESS:
716              operation.apply(fs, path, defaultAndAccessAclEntries);
717              break;
718            default:
719              throw new IllegalArgumentException("Illegal HDFS acl type: " + aclType);
720          }
721        } else {
722          operation.apply(fs, path, accessAclEntries);
723        }
724      }
725    }
726
727    private List<AclEntry> getAclEntries(AclType aclType, Set<String> users) {
728      List<AclEntry> aclEntries = new ArrayList<>();
729      switch (aclType) {
730        case ACCESS:
731          for (String user : users) {
732            aclEntries.add(aclEntry(ACCESS, user));
733          }
734          break;
735        case DEFAULT:
736          for (String user : users) {
737            aclEntries.add(aclEntry(DEFAULT, user));
738          }
739          break;
740        case DEFAULT_ADN_ACCESS:
741          for (String user : users) {
742            aclEntries.add(aclEntry(ACCESS, user));
743            aclEntries.add(aclEntry(DEFAULT, user));
744          }
745          break;
746        default:
747          throw new IllegalArgumentException("Illegal HDFS acl type: " + aclType);
748      }
749      return aclEntries;
750    }
751  }
752
753  static final class PathHelper {
754    Configuration conf;
755    Path rootDir;
756    Path tmpDataDir;
757    Path dataDir;
758    Path mobDataDir;
759    Path archiveDataDir;
760    Path snapshotDir;
761
762    PathHelper(Configuration conf) {
763      this.conf = conf;
764      rootDir = new Path(conf.get(HConstants.HBASE_DIR));
765      tmpDataDir =
766        new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), HConstants.BASE_NAMESPACE_DIR);
767      dataDir = new Path(rootDir, HConstants.BASE_NAMESPACE_DIR);
768      mobDataDir = new Path(MobUtils.getMobHome(rootDir), HConstants.BASE_NAMESPACE_DIR);
769      archiveDataDir = new Path(new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY),
770        HConstants.BASE_NAMESPACE_DIR);
771      snapshotDir = new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
772    }
773
774    Path getRootDir() {
775      return rootDir;
776    }
777
778    Path getDataDir() {
779      return dataDir;
780    }
781
782    Path getMobDir() {
783      return mobDataDir.getParent();
784    }
785
786    Path getMobDataDir() {
787      return mobDataDir;
788    }
789
790    Path getTmpDir() {
791      return new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY);
792    }
793
794    Path getTmpDataDir() {
795      return tmpDataDir;
796    }
797
798    Path getArchiveDir() {
799      return new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
800    }
801
802    Path getArchiveDataDir() {
803      return archiveDataDir;
804    }
805
806    Path getDataNsDir(String namespace) {
807      return new Path(dataDir, namespace);
808    }
809
810    Path getMobDataNsDir(String namespace) {
811      return new Path(mobDataDir, namespace);
812    }
813
814    Path getDataTableDir(TableName tableName) {
815      return new Path(getDataNsDir(tableName.getNamespaceAsString()),
816        tableName.getQualifierAsString());
817    }
818
819    Path getMobTableDir(TableName tableName) {
820      return new Path(getMobDataNsDir(tableName.getNamespaceAsString()),
821        tableName.getQualifierAsString());
822    }
823
824    Path getArchiveNsDir(String namespace) {
825      return new Path(archiveDataDir, namespace);
826    }
827
828    Path getArchiveTableDir(TableName tableName) {
829      return new Path(getArchiveNsDir(tableName.getNamespaceAsString()),
830        tableName.getQualifierAsString());
831    }
832
833    Path getTmpNsDir(String namespace) {
834      return new Path(tmpDataDir, namespace);
835    }
836
837    Path getTmpTableDir(TableName tableName) {
838      return new Path(getTmpNsDir(tableName.getNamespaceAsString()),
839        tableName.getQualifierAsString());
840    }
841
842    Path getSnapshotRootDir() {
843      return snapshotDir;
844    }
845
846    Path getSnapshotDir(String snapshot) {
847      return new Path(snapshotDir, snapshot);
848    }
849
850    FileSystem getFileSystem() throws IOException {
851      return rootDir.getFileSystem(conf);
852    }
853  }
854}