001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 * http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018
019package org.apache.hadoop.hbase.security.access;
020
021import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
022import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
023import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
024import static org.apache.hadoop.fs.permission.AclEntryType.USER;
025import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
026
027import java.io.Closeable;
028import java.io.FileNotFoundException;
029import java.io.IOException;
030import java.util.ArrayList;
031import java.util.Collections;
032import java.util.HashSet;
033import java.util.List;
034import java.util.Map;
035import java.util.Set;
036import java.util.concurrent.CompletableFuture;
037import java.util.concurrent.ExecutionException;
038import java.util.concurrent.ExecutorService;
039import java.util.concurrent.Executors;
040import java.util.stream.Collectors;
041
042import org.apache.hadoop.conf.Configuration;
043import org.apache.hadoop.fs.FileStatus;
044import org.apache.hadoop.fs.FileSystem;
045import org.apache.hadoop.fs.Path;
046import org.apache.hadoop.fs.permission.AclEntry;
047import org.apache.hadoop.fs.permission.AclEntryScope;
048import org.apache.hadoop.fs.permission.FsPermission;
049import org.apache.hadoop.hbase.AuthUtil;
050import org.apache.hadoop.hbase.HConstants;
051import org.apache.hadoop.hbase.NamespaceDescriptor;
052import org.apache.hadoop.hbase.TableName;
053import org.apache.hadoop.hbase.client.Admin;
054import org.apache.hadoop.hbase.client.Connection;
055import org.apache.hadoop.hbase.client.SnapshotDescription;
056import org.apache.hadoop.hbase.client.TableDescriptor;
057import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
058import org.apache.hadoop.hbase.mob.MobUtils;
059import org.apache.hadoop.hbase.util.Bytes;
060import org.apache.yetus.audience.InterfaceAudience;
061import org.slf4j.Logger;
062import org.slf4j.LoggerFactory;
063
064import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
065import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
066import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
067import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
068
069/**
070 * A helper to modify or remove HBase granted user default and access HDFS ACLs over hFiles.
071 */
072@InterfaceAudience.Private
073public class SnapshotScannerHDFSAclHelper implements Closeable {
074  private static final Logger LOG = LoggerFactory.getLogger(SnapshotScannerHDFSAclHelper.class);
075
076  public static final String ACL_SYNC_TO_HDFS_ENABLE = "hbase.acl.sync.to.hdfs.enable";
077  public static final String ACL_SYNC_TO_HDFS_THREAD_NUMBER =
078      "hbase.acl.sync.to.hdfs.thread.number";
079  // The tmp directory to restore snapshot, it can not be a sub directory of HBase root dir
080  public static final String SNAPSHOT_RESTORE_TMP_DIR = "hbase.snapshot.restore.tmp.dir";
081  public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT =
082      "/hbase/.tmpdir-to-restore-snapshot";
083  // The default permission of the common directories if the feature is enabled.
084  public static final String COMMON_DIRECTORY_PERMISSION =
085      "hbase.acl.sync.to.hdfs.common.directory.permission";
086  // The secure HBase permission is 700, 751 means all others have execute access and the mask is
087  // set to read-execute to make the extended access ACL entries can work. Be cautious to set
088  // this value.
089  public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751";
090  // The default permission of the snapshot restore directories if the feature is enabled.
091  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION =
092      "hbase.acl.sync.to.hdfs.restore.directory.permission";
093  // 753 means all others have write-execute access.
094  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = "753";
095
096  private Admin admin;
097  private final Configuration conf;
098  private FileSystem fs;
099  private PathHelper pathHelper;
100  private ExecutorService pool;
101
102  public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection connection)
103      throws IOException {
104    this.conf = configuration;
105    this.pathHelper = new PathHelper(conf);
106    this.fs = pathHelper.getFileSystem();
107    this.pool = Executors.newFixedThreadPool(conf.getInt(ACL_SYNC_TO_HDFS_THREAD_NUMBER, 10),
108      new ThreadFactoryBuilder().setNameFormat("hdfs-acl-thread-%d").setDaemon(true).build());
109    this.admin = connection.getAdmin();
110  }
111
112  @Override
113  public void close() {
114    if (pool != null) {
115      pool.shutdown();
116    }
117    admin.close();
118  }
119
120  public void setCommonDirectoryPermission() throws IOException {
121    // Set public directory permission to 751 to make all users have access permission.
122    // And we also need the access permission of the parent of HBase root directory, but
123    // it's not set here, because the owner of HBase root directory may don't own permission
124    // to change it's parent permission to 751.
125    // The {root/.tmp} and {root/.tmp/data} directories are created to make global user HDFS
126    // ACLs can be inherited.
127    List<Path> paths = Lists.newArrayList(pathHelper.getRootDir(), pathHelper.getMobDir(),
128      pathHelper.getTmpDir(), pathHelper.getArchiveDir());
129    paths.addAll(getGlobalRootPaths());
130    for (Path path : paths) {
131      createDirIfNotExist(path);
132      fs.setPermission(path, new FsPermission(
133          conf.get(COMMON_DIRECTORY_PERMISSION, COMMON_DIRECTORY_PERMISSION_DEFAULT)));
134    }
135    // create snapshot restore directory
136    Path restoreDir =
137        new Path(conf.get(SNAPSHOT_RESTORE_TMP_DIR, SNAPSHOT_RESTORE_TMP_DIR_DEFAULT));
138    createDirIfNotExist(restoreDir);
139    fs.setPermission(restoreDir, new FsPermission(conf.get(SNAPSHOT_RESTORE_DIRECTORY_PERMISSION,
140      SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT)));
141  }
142
143  /**
144   * Set acl when grant user permission
145   * @param userPermission the user and permission
146   * @param skipNamespaces the namespace set to skip set acl because already set
147   * @param skipTables the table set to skip set acl because already set
148   * @return false if an error occurred, otherwise true
149   */
150  public boolean grantAcl(UserPermission userPermission, Set<String> skipNamespaces,
151      Set<TableName> skipTables) {
152    try {
153      long start = System.currentTimeMillis();
154      handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.MODIFY, skipNamespaces,
155        skipTables);
156      LOG.info("Set HDFS acl when grant {}, cost {} ms", userPermission,
157        System.currentTimeMillis() - start);
158      return true;
159    } catch (Exception e) {
160      LOG.error("Set HDFS acl error when grant: {}", userPermission, e);
161      return false;
162    }
163  }
164
165  /**
166   * Remove acl when grant or revoke user permission
167   * @param userPermission the user and permission
168   * @param skipNamespaces the namespace set to skip remove acl
169   * @param skipTables the table set to skip remove acl
170   * @return false if an error occurred, otherwise true
171   */
172  public boolean revokeAcl(UserPermission userPermission, Set<String> skipNamespaces,
173      Set<TableName> skipTables) {
174    try {
175      long start = System.currentTimeMillis();
176      handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.REMOVE, skipNamespaces,
177        skipTables);
178      LOG.info("Set HDFS acl when revoke {}, cost {} ms", userPermission,
179        System.currentTimeMillis() - start);
180      return true;
181    } catch (Exception e) {
182      LOG.error("Set HDFS acl error when revoke: {}", userPermission, e);
183      return false;
184    }
185  }
186
187  /**
188   * Set acl when take a snapshot
189   * @param snapshot the snapshot desc
190   * @return false if an error occurred, otherwise true
191   */
192  public boolean snapshotAcl(SnapshotDescription snapshot) {
193    try {
194      long start = System.currentTimeMillis();
195      TableName tableName = snapshot.getTableName();
196      // global user permission can be inherited from default acl automatically
197      Set<String> userSet = getUsersWithTableReadAction(tableName, true, false);
198      if (userSet.size() > 0) {
199        Path path = pathHelper.getSnapshotDir(snapshot.getName());
200        handleHDFSAcl(new HDFSAclOperation(fs, path, userSet, HDFSAclOperation.OperationType.MODIFY,
201            true, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).get();
202      }
203      LOG.info("Set HDFS acl when snapshot {}, cost {} ms", snapshot.getName(),
204        System.currentTimeMillis() - start);
205      return true;
206    } catch (Exception e) {
207      LOG.error("Set HDFS acl error when snapshot {}", snapshot, e);
208      return false;
209    }
210  }
211
212  /**
213   * Remove table access acl from namespace dir when delete table
214   * @param tableName the table
215   * @param removeUsers the users whose access acl will be removed
216   * @return false if an error occurred, otherwise true
217   */
218  public boolean removeNamespaceAccessAcl(TableName tableName, Set<String> removeUsers,
219      String operation) {
220    try {
221      long start = System.currentTimeMillis();
222      if (removeUsers.size() > 0) {
223        handleNamespaceAccessAcl(tableName.getNamespaceAsString(), removeUsers,
224          HDFSAclOperation.OperationType.REMOVE);
225      }
226      LOG.info("Remove HDFS acl when {} table {}, cost {} ms", operation, tableName,
227        System.currentTimeMillis() - start);
228      return true;
229    } catch (Exception e) {
230      LOG.error("Remove HDFS acl error when {} table {}", operation, tableName, e);
231      return false;
232    }
233  }
234
235  /**
236   * Remove default acl from namespace archive dir when delete namespace
237   * @param namespace the namespace
238   * @param removeUsers the users whose default acl will be removed
239   * @return false if an error occurred, otherwise true
240   */
241  public boolean removeNamespaceDefaultAcl(String namespace, Set<String> removeUsers) {
242    try {
243      long start = System.currentTimeMillis();
244      Path archiveNsDir = pathHelper.getArchiveNsDir(namespace);
245      HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers,
246          HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
247      operation.handleAcl();
248      LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace,
249        System.currentTimeMillis() - start);
250      return true;
251    } catch (Exception e) {
252      LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e);
253      return false;
254    }
255  }
256
257  /**
258   * Remove default acl from table archive dir when delete table
259   * @param tableName the table name
260   * @param removeUsers the users whose default acl will be removed
261   * @return false if an error occurred, otherwise true
262   */
263  public boolean removeTableDefaultAcl(TableName tableName, Set<String> removeUsers) {
264    try {
265      long start = System.currentTimeMillis();
266      Path archiveTableDir = pathHelper.getArchiveTableDir(tableName);
267      HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers,
268          HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
269      operation.handleAcl();
270      LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName,
271        System.currentTimeMillis() - start);
272      return true;
273    } catch (Exception e) {
274      LOG.error("Remove HDFS acl error when delete table {}", tableName, e);
275      return false;
276    }
277  }
278
279  /**
280   * Add table user acls
281   * @param tableName the table
282   * @param users the table users with READ permission
283   * @return false if an error occurred, otherwise true
284   */
285  public boolean addTableAcl(TableName tableName, Set<String> users, String operation) {
286    try {
287      long start = System.currentTimeMillis();
288      if (users.size() > 0) {
289        HDFSAclOperation.OperationType operationType = HDFSAclOperation.OperationType.MODIFY;
290        handleNamespaceAccessAcl(tableName.getNamespaceAsString(), users, operationType);
291        handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0),
292          operationType);
293      }
294      LOG.info("Set HDFS acl when {} table {}, cost {} ms", operation, tableName,
295        System.currentTimeMillis() - start);
296      return true;
297    } catch (Exception e) {
298      LOG.error("Set HDFS acl error when {} table {}", operation, tableName, e);
299      return false;
300    }
301  }
302
303  /**
304   * Remove table acls when modify table
305   * @param tableName the table
306   * @param users the table users with READ permission
307   * @return false if an error occurred, otherwise true
308   */
309  public boolean removeTableAcl(TableName tableName, Set<String> users) {
310    try {
311      long start = System.currentTimeMillis();
312      if (users.size() > 0) {
313        handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0),
314          HDFSAclOperation.OperationType.REMOVE);
315      }
316      LOG.info("Set HDFS acl when create or modify table {}, cost {} ms", tableName,
317        System.currentTimeMillis() - start);
318      return true;
319    } catch (Exception e) {
320      LOG.error("Set HDFS acl error when create or modify table {}", tableName, e);
321      return false;
322    }
323  }
324
325  private void handleGrantOrRevokeAcl(UserPermission userPermission,
326      HDFSAclOperation.OperationType operationType, Set<String> skipNamespaces,
327      Set<TableName> skipTables) throws ExecutionException, InterruptedException, IOException {
328    Set<String> users = Sets.newHashSet(userPermission.getUser());
329    switch (userPermission.getAccessScope()) {
330      case GLOBAL:
331        handleGlobalAcl(users, skipNamespaces, skipTables, operationType);
332        break;
333      case NAMESPACE:
334        NamespacePermission namespacePermission =
335            (NamespacePermission) userPermission.getPermission();
336        handleNamespaceAcl(Sets.newHashSet(namespacePermission.getNamespace()), users,
337          skipNamespaces, skipTables, operationType);
338        break;
339      case TABLE:
340        TablePermission tablePermission = (TablePermission) userPermission.getPermission();
341        handleNamespaceAccessAcl(tablePermission.getNamespace(), users, operationType);
342        handleTableAcl(Sets.newHashSet(tablePermission.getTableName()), users, skipNamespaces,
343          skipTables, operationType);
344        break;
345      default:
346        throw new IllegalArgumentException(
347            "Illegal user permission scope " + userPermission.getAccessScope());
348    }
349  }
350
351  private void handleGlobalAcl(Set<String> users, Set<String> skipNamespaces,
352      Set<TableName> skipTables, HDFSAclOperation.OperationType operationType)
353      throws ExecutionException, InterruptedException, IOException {
354    // handle global root directories HDFS acls
355    List<HDFSAclOperation> hdfsAclOperations = getGlobalRootPaths().stream()
356        .map(path -> new HDFSAclOperation(fs, path, users, operationType, false,
357            HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS))
358        .collect(Collectors.toList());
359    handleHDFSAclParallel(hdfsAclOperations).get();
360    // handle namespace HDFS acls
361    handleNamespaceAcl(Sets.newHashSet(admin.listNamespaces()), users, skipNamespaces, skipTables,
362      operationType);
363  }
364
365  private void handleNamespaceAcl(Set<String> namespaces, Set<String> users,
366      Set<String> skipNamespaces, Set<TableName> skipTables,
367      HDFSAclOperation.OperationType operationType)
368      throws ExecutionException, InterruptedException, IOException {
369    namespaces.removeAll(skipNamespaces);
370    namespaces.remove(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
371    // handle namespace root directories HDFS acls
372    List<HDFSAclOperation> hdfsAclOperations = new ArrayList<>();
373    Set<String> skipTableNamespaces =
374        skipTables.stream().map(TableName::getNamespaceAsString).collect(Collectors.toSet());
375    for (String ns : namespaces) {
376      /**
377       * When op is REMOVE, remove the DEFAULT namespace ACL while keep the ACCESS for skipTables,
378       * otherwise remove both the DEFAULT + ACCESS ACLs. When op is MODIFY, just operate the
379       * DEFAULT + ACCESS ACLs.
380       */
381      HDFSAclOperation.OperationType op = operationType;
382      HDFSAclOperation.AclType aclType = HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS;
383      if (operationType == HDFSAclOperation.OperationType.REMOVE
384          && skipTableNamespaces.contains(ns)) {
385        // remove namespace directories default HDFS acls for skip tables
386        op = HDFSAclOperation.OperationType.REMOVE;
387        aclType = HDFSAclOperation.AclType.DEFAULT;
388      }
389      for (Path path : getNamespaceRootPaths(ns)) {
390        hdfsAclOperations.add(new HDFSAclOperation(fs, path, users, op, false, aclType));
391      }
392    }
393    handleHDFSAclParallel(hdfsAclOperations).get();
394    // handle table directories HDFS acls
395    Set<TableName> tables = new HashSet<>();
396    for (String namespace : namespaces) {
397      tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream()
398          .filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName)
399          .collect(Collectors.toSet()));
400    }
401    handleTableAcl(tables, users, skipNamespaces, skipTables, operationType);
402  }
403
404  private void handleTableAcl(Set<TableName> tableNames, Set<String> users,
405      Set<String> skipNamespaces, Set<TableName> skipTables,
406      HDFSAclOperation.OperationType operationType)
407      throws ExecutionException, InterruptedException, IOException {
408    Set<TableName> filterTableNames = new HashSet<>();
409    for (TableName tableName : tableNames) {
410      if (!skipTables.contains(tableName)
411          && !skipNamespaces.contains(tableName.getNamespaceAsString())) {
412        filterTableNames.add(tableName);
413      }
414    }
415    List<CompletableFuture<Void>> futures = new ArrayList<>();
416    // handle table HDFS acls
417    for (TableName tableName : filterTableNames) {
418      List<HDFSAclOperation> hdfsAclOperations = getTableRootPaths(tableName, true).stream()
419          .map(path -> new HDFSAclOperation(fs, path, users, operationType, true,
420              HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS))
421          .collect(Collectors.toList());
422      CompletableFuture<Void> future = handleHDFSAclSequential(hdfsAclOperations);
423      futures.add(future);
424    }
425    CompletableFuture<Void> future =
426        CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
427    future.get();
428  }
429
430  private void handleNamespaceAccessAcl(String namespace, Set<String> users,
431      HDFSAclOperation.OperationType operationType)
432      throws ExecutionException, InterruptedException {
433    // handle namespace access HDFS acls
434    List<HDFSAclOperation> hdfsAclOperations =
435        getNamespaceRootPaths(namespace).stream().map(path -> new HDFSAclOperation(fs, path, users,
436            operationType, false, HDFSAclOperation.AclType.ACCESS)).collect(Collectors.toList());
437    CompletableFuture<Void> future = handleHDFSAclParallel(hdfsAclOperations);
438    future.get();
439  }
440
441  void createTableDirectories(TableName tableName) throws IOException {
442    List<Path> paths = getTableRootPaths(tableName, false);
443    for (Path path : paths) {
444      createDirIfNotExist(path);
445    }
446  }
447
448  /**
449   * return paths that user will global permission will visit
450   * @return the path list
451   */
452  List<Path> getGlobalRootPaths() {
453    return Lists.newArrayList(pathHelper.getTmpDataDir(), pathHelper.getDataDir(),
454      pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), pathHelper.getSnapshotRootDir());
455  }
456
457  /**
458   * return paths that user will namespace permission will visit
459   * @param namespace the namespace
460   * @return the path list
461   */
462  List<Path> getNamespaceRootPaths(String namespace) {
463    return Lists.newArrayList(pathHelper.getTmpNsDir(namespace), pathHelper.getDataNsDir(namespace),
464      pathHelper.getMobDataNsDir(namespace), pathHelper.getArchiveNsDir(namespace));
465  }
466
467  /**
468   * return paths that user will table permission will visit
469   * @param tableName the table
470   * @param includeSnapshotPath true if return table snapshots paths, otherwise false
471   * @return the path list
472   * @throws IOException if an error occurred
473   */
474  List<Path> getTableRootPaths(TableName tableName, boolean includeSnapshotPath)
475      throws IOException {
476    List<Path> paths = Lists.newArrayList(pathHelper.getTmpTableDir(tableName),
477      pathHelper.getDataTableDir(tableName), pathHelper.getMobTableDir(tableName),
478      pathHelper.getArchiveTableDir(tableName));
479    if (includeSnapshotPath) {
480      paths.addAll(getTableSnapshotPaths(tableName));
481    }
482    return paths;
483  }
484
485  private List<Path> getTableSnapshotPaths(TableName tableName) throws IOException {
486    return admin.listSnapshots().stream()
487        .filter(snapDesc -> snapDesc.getTableName().equals(tableName))
488        .map(snapshotDescription -> pathHelper.getSnapshotDir(snapshotDescription.getName()))
489        .collect(Collectors.toList());
490  }
491
492  /**
493   * Return users with global read permission
494   * @return users with global read permission
495   * @throws IOException if an error occurred
496   */
497  private Set<String> getUsersWithGlobalReadAction() throws IOException {
498    return getUsersWithReadAction(PermissionStorage.getGlobalPermissions(conf));
499  }
500
501  /**
502   * Return users with namespace read permission
503   * @param namespace the namespace
504   * @param includeGlobal true if include users with global read action
505   * @return users with namespace read permission
506   * @throws IOException if an error occurred
507   */
508  Set<String> getUsersWithNamespaceReadAction(String namespace, boolean includeGlobal)
509      throws IOException {
510    Set<String> users =
511        getUsersWithReadAction(PermissionStorage.getNamespacePermissions(conf, namespace));
512    if (includeGlobal) {
513      users.addAll(getUsersWithGlobalReadAction());
514    }
515    return users;
516  }
517
518  /**
519   * Return users with table read permission
520   * @param tableName the table
521   * @param includeNamespace true if include users with namespace read action
522   * @param includeGlobal true if include users with global read action
523   * @return users with table read permission
524   * @throws IOException if an error occurred
525   */
526  Set<String> getUsersWithTableReadAction(TableName tableName, boolean includeNamespace,
527      boolean includeGlobal) throws IOException {
528    Set<String> users =
529        getUsersWithReadAction(PermissionStorage.getTablePermissions(conf, tableName));
530    if (includeNamespace) {
531      users
532          .addAll(getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), includeGlobal));
533    }
534    return users;
535  }
536
537  private Set<String>
538      getUsersWithReadAction(ListMultimap<String, UserPermission> permissionMultimap) {
539    return permissionMultimap.entries().stream()
540        .filter(entry -> checkUserPermission(entry.getValue())).map(Map.Entry::getKey)
541        .collect(Collectors.toSet());
542  }
543
544  private boolean checkUserPermission(UserPermission userPermission) {
545    boolean result = containReadAction(userPermission);
546    if (result && userPermission.getPermission() instanceof TablePermission) {
547      result = isNotFamilyOrQualifierPermission((TablePermission) userPermission.getPermission());
548    }
549    return result;
550  }
551
552  boolean containReadAction(UserPermission userPermission) {
553    return userPermission.getPermission().implies(Permission.Action.READ);
554  }
555
556  boolean isNotFamilyOrQualifierPermission(TablePermission tablePermission) {
557    return !tablePermission.hasFamily() && !tablePermission.hasQualifier();
558  }
559
560  public static boolean isAclSyncToHdfsEnabled(Configuration conf) {
561    String[] masterCoprocessors = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
562    Set<String> masterCoprocessorSet = new HashSet<>();
563    if (masterCoprocessors != null) {
564      Collections.addAll(masterCoprocessorSet, masterCoprocessors);
565    }
566    return conf.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)
567        && masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName())
568        && masterCoprocessorSet.contains(AccessController.class.getName());
569  }
570
571  boolean isAclSyncToHdfsEnabled(TableDescriptor tableDescriptor) {
572    return tableDescriptor == null ? false
573        : Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE));
574  }
575
576  PathHelper getPathHelper() {
577    return pathHelper;
578  }
579
580  private CompletableFuture<Void> handleHDFSAcl(HDFSAclOperation acl) {
581    return CompletableFuture.supplyAsync(() -> {
582      List<HDFSAclOperation> childAclOperations = new ArrayList<>();
583      try {
584        acl.handleAcl();
585        childAclOperations = acl.getChildAclOperations();
586      } catch (FileNotFoundException e) {
587        // Skip handle acl if file not found
588      } catch (IOException e) {
589        LOG.error("Set HDFS acl error for path {}", acl.path, e);
590      }
591      return childAclOperations;
592    }, pool).thenComposeAsync(this::handleHDFSAclParallel, pool);
593  }
594
595  private CompletableFuture<Void> handleHDFSAclSequential(List<HDFSAclOperation> operations) {
596    return CompletableFuture.supplyAsync(() -> {
597      try {
598        for (HDFSAclOperation hdfsAclOperation : operations) {
599          handleHDFSAcl(hdfsAclOperation).get();
600        }
601      } catch (InterruptedException | ExecutionException e) {
602        LOG.error("Set HDFS acl error", e);
603      }
604      return null;
605    }, pool);
606  }
607
608  private CompletableFuture<Void> handleHDFSAclParallel(List<HDFSAclOperation> operations) {
609    List<CompletableFuture<Void>> futures =
610        operations.stream().map(this::handleHDFSAcl).collect(Collectors.toList());
611    return CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
612  }
613
614  private static AclEntry aclEntry(AclEntryScope scope, String name) {
615    return new AclEntry.Builder().setScope(scope)
616        .setType(AuthUtil.isGroupPrincipal(name) ? GROUP : USER).setName(name)
617        .setPermission(READ_EXECUTE).build();
618  }
619
620  void createDirIfNotExist(Path path) throws IOException {
621    if (!fs.exists(path)) {
622      fs.mkdirs(path);
623    }
624  }
625
626  void deleteEmptyDir(Path path) throws IOException {
627    if (fs.exists(path) && fs.listStatus(path).length == 0) {
628      fs.delete(path, false);
629    }
630  }
631
632  /**
633   * Inner class used to describe modify or remove what type of acl entries(ACCESS, DEFAULT,
634   * ACCESS_AND_DEFAULT) for files or directories(and child files).
635   */
636  private static class HDFSAclOperation {
637    enum OperationType {
638      MODIFY, REMOVE
639    }
640
641    enum AclType {
642      ACCESS, DEFAULT, DEFAULT_ADN_ACCESS
643    }
644
645    private interface Operation {
646      void apply(FileSystem fs, Path path, List<AclEntry> aclList) throws IOException;
647    }
648
649    private FileSystem fs;
650    private Path path;
651    private Operation operation;
652    private boolean recursive;
653    private AclType aclType;
654    private List<AclEntry> defaultAndAccessAclEntries;
655    private List<AclEntry> accessAclEntries;
656    private List<AclEntry> defaultAclEntries;
657
658    HDFSAclOperation(FileSystem fs, Path path, Set<String> users, OperationType operationType,
659        boolean recursive, AclType aclType) {
660      this.fs = fs;
661      this.path = path;
662      this.defaultAndAccessAclEntries = getAclEntries(AclType.DEFAULT_ADN_ACCESS, users);
663      this.accessAclEntries = getAclEntries(AclType.ACCESS, users);
664      this.defaultAclEntries = getAclEntries(AclType.DEFAULT, users);
665      if (operationType == OperationType.MODIFY) {
666        operation = FileSystem::modifyAclEntries;
667      } else if (operationType == OperationType.REMOVE) {
668        operation = FileSystem::removeAclEntries;
669      } else {
670        throw new IllegalArgumentException("Illegal HDFS acl operation type: " + operationType);
671      }
672      this.recursive = recursive;
673      this.aclType = aclType;
674    }
675
676    HDFSAclOperation(Path path, HDFSAclOperation parent) {
677      this.fs = parent.fs;
678      this.path = path;
679      this.defaultAndAccessAclEntries = parent.defaultAndAccessAclEntries;
680      this.accessAclEntries = parent.accessAclEntries;
681      this.defaultAclEntries = parent.defaultAclEntries;
682      this.operation = parent.operation;
683      this.recursive = parent.recursive;
684      this.aclType = parent.aclType;
685    }
686
687    List<HDFSAclOperation> getChildAclOperations() throws IOException {
688      List<HDFSAclOperation> hdfsAclOperations = new ArrayList<>();
689      if (recursive && fs.isDirectory(path)) {
690        FileStatus[] fileStatuses = fs.listStatus(path);
691        for (FileStatus fileStatus : fileStatuses) {
692          hdfsAclOperations.add(new HDFSAclOperation(fileStatus.getPath(), this));
693        }
694      }
695      return hdfsAclOperations;
696    }
697
698    void handleAcl() throws IOException {
699      if (fs.exists(path)) {
700        if (fs.isDirectory(path)) {
701          switch (aclType) {
702            case ACCESS:
703              operation.apply(fs, path, accessAclEntries);
704              break;
705            case DEFAULT:
706              operation.apply(fs, path, defaultAclEntries);
707              break;
708            case DEFAULT_ADN_ACCESS:
709              operation.apply(fs, path, defaultAndAccessAclEntries);
710              break;
711            default:
712              throw new IllegalArgumentException("Illegal HDFS acl type: " + aclType);
713          }
714        } else {
715          operation.apply(fs, path, accessAclEntries);
716        }
717      }
718    }
719
720    private List<AclEntry> getAclEntries(AclType aclType, Set<String> users) {
721      List<AclEntry> aclEntries = new ArrayList<>();
722      switch (aclType) {
723        case ACCESS:
724          for (String user : users) {
725            aclEntries.add(aclEntry(ACCESS, user));
726          }
727          break;
728        case DEFAULT:
729          for (String user : users) {
730            aclEntries.add(aclEntry(DEFAULT, user));
731          }
732          break;
733        case DEFAULT_ADN_ACCESS:
734          for (String user : users) {
735            aclEntries.add(aclEntry(ACCESS, user));
736            aclEntries.add(aclEntry(DEFAULT, user));
737          }
738          break;
739        default:
740          throw new IllegalArgumentException("Illegal HDFS acl type: " + aclType);
741      }
742      return aclEntries;
743    }
744  }
745
746  static final class PathHelper {
747    Configuration conf;
748    Path rootDir;
749    Path tmpDataDir;
750    Path dataDir;
751    Path mobDataDir;
752    Path archiveDataDir;
753    Path snapshotDir;
754
755    PathHelper(Configuration conf) {
756      this.conf = conf;
757      rootDir = new Path(conf.get(HConstants.HBASE_DIR));
758      tmpDataDir = new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY),
759          HConstants.BASE_NAMESPACE_DIR);
760      dataDir = new Path(rootDir, HConstants.BASE_NAMESPACE_DIR);
761      mobDataDir = new Path(MobUtils.getMobHome(rootDir), HConstants.BASE_NAMESPACE_DIR);
762      archiveDataDir = new Path(new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY),
763          HConstants.BASE_NAMESPACE_DIR);
764      snapshotDir = new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
765    }
766
767    Path getRootDir() {
768      return rootDir;
769    }
770
771    Path getDataDir() {
772      return dataDir;
773    }
774
775    Path getMobDir() {
776      return mobDataDir.getParent();
777    }
778
779    Path getMobDataDir() {
780      return mobDataDir;
781    }
782
783    Path getTmpDir() {
784      return new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY);
785    }
786
787    Path getTmpDataDir() {
788      return tmpDataDir;
789    }
790
791    Path getArchiveDir() {
792      return new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
793    }
794
795    Path getArchiveDataDir() {
796      return archiveDataDir;
797    }
798
799    Path getDataNsDir(String namespace) {
800      return new Path(dataDir, namespace);
801    }
802
803    Path getMobDataNsDir(String namespace) {
804      return new Path(mobDataDir, namespace);
805    }
806
807    Path getDataTableDir(TableName tableName) {
808      return new Path(getDataNsDir(tableName.getNamespaceAsString()),
809          tableName.getQualifierAsString());
810    }
811
812    Path getMobTableDir(TableName tableName) {
813      return new Path(getMobDataNsDir(tableName.getNamespaceAsString()),
814          tableName.getQualifierAsString());
815    }
816
817    Path getArchiveNsDir(String namespace) {
818      return new Path(archiveDataDir, namespace);
819    }
820
821    Path getArchiveTableDir(TableName tableName) {
822      return new Path(getArchiveNsDir(tableName.getNamespaceAsString()),
823          tableName.getQualifierAsString());
824    }
825
826    Path getTmpNsDir(String namespace) {
827      return new Path(tmpDataDir, namespace);
828    }
829
830    Path getTmpTableDir(TableName tableName) {
831      return new Path(getTmpNsDir(tableName.getNamespaceAsString()),
832          tableName.getQualifierAsString());
833    }
834
835    Path getSnapshotRootDir() {
836      return snapshotDir;
837    }
838
839    Path getSnapshotDir(String snapshot) {
840      return new Path(snapshotDir, snapshot);
841    }
842
843    FileSystem getFileSystem() throws IOException {
844      return rootDir.getFileSystem(conf);
845    }
846  }
847}