001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.security.access;
019
020import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
021import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
022import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
023import static org.apache.hadoop.fs.permission.AclEntryType.USER;
024import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
025
026import java.io.Closeable;
027import java.io.FileNotFoundException;
028import java.io.IOException;
029import java.util.ArrayList;
030import java.util.Collections;
031import java.util.HashSet;
032import java.util.List;
033import java.util.Map;
034import java.util.Set;
035import java.util.concurrent.CompletableFuture;
036import java.util.concurrent.ExecutionException;
037import java.util.concurrent.ExecutorService;
038import java.util.concurrent.Executors;
039import java.util.stream.Collectors;
040import org.apache.hadoop.conf.Configuration;
041import org.apache.hadoop.fs.FileStatus;
042import org.apache.hadoop.fs.FileSystem;
043import org.apache.hadoop.fs.Path;
044import org.apache.hadoop.fs.permission.AclEntry;
045import org.apache.hadoop.fs.permission.AclEntryScope;
046import org.apache.hadoop.fs.permission.FsPermission;
047import org.apache.hadoop.hbase.AuthUtil;
048import org.apache.hadoop.hbase.HConstants;
049import org.apache.hadoop.hbase.NamespaceDescriptor;
050import org.apache.hadoop.hbase.TableName;
051import org.apache.hadoop.hbase.client.Admin;
052import org.apache.hadoop.hbase.client.Connection;
053import org.apache.hadoop.hbase.client.SnapshotDescription;
054import org.apache.hadoop.hbase.client.TableDescriptor;
055import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
056import org.apache.hadoop.hbase.mob.MobUtils;
057import org.apache.hadoop.hbase.util.Bytes;
058import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
059import org.apache.yetus.audience.InterfaceAudience;
060import org.slf4j.Logger;
061import org.slf4j.LoggerFactory;
062
063import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
064import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
065import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
066import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
067
068/**
069 * A helper to modify or remove HBase granted user default and access HDFS ACLs over hFiles.
070 */
071@InterfaceAudience.Private
072public class SnapshotScannerHDFSAclHelper implements Closeable {
073  private static final Logger LOG = LoggerFactory.getLogger(SnapshotScannerHDFSAclHelper.class);
074
075  public static final String ACL_SYNC_TO_HDFS_ENABLE = "hbase.acl.sync.to.hdfs.enable";
076  public static final String ACL_SYNC_TO_HDFS_THREAD_NUMBER =
077    "hbase.acl.sync.to.hdfs.thread.number";
078  // The tmp directory to restore snapshot, it can not be a sub directory of HBase root dir
079  public static final String SNAPSHOT_RESTORE_TMP_DIR = "hbase.snapshot.restore.tmp.dir";
080  public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT =
081    "/hbase/.tmpdir-to-restore-snapshot";
082  // The default permission of the common directories if the feature is enabled.
083  public static final String COMMON_DIRECTORY_PERMISSION =
084    "hbase.acl.sync.to.hdfs.common.directory.permission";
085  // The secure HBase permission is 700, 751 means all others have execute access and the mask is
086  // set to read-execute to make the extended access ACL entries can work. Be cautious to set
087  // this value.
088  public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751";
089  // The default permission of the snapshot restore directories if the feature is enabled.
090  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION =
091    "hbase.acl.sync.to.hdfs.restore.directory.permission";
092  // 753 means all others have write-execute access.
093  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = "753";
094
095  private Admin admin;
096  private final Configuration conf;
097  private FileSystem fs;
098  private PathHelper pathHelper;
099  private ExecutorService pool;
100
101  public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection connection)
102    throws IOException {
103    this.conf = configuration;
104    this.pathHelper = new PathHelper(conf);
105    this.fs = pathHelper.getFileSystem();
106    this.pool = Executors.newFixedThreadPool(conf.getInt(ACL_SYNC_TO_HDFS_THREAD_NUMBER, 10),
107      new ThreadFactoryBuilder().setNameFormat("hdfs-acl-thread-%d").setDaemon(true).build());
108    this.admin = connection.getAdmin();
109  }
110
111  @Override
112  public void close() {
113    if (pool != null) {
114      pool.shutdown();
115    }
116    admin.close();
117  }
118
119  public void setCommonDirectoryPermission() throws IOException {
120    // Set public directory permission to 751 to make all users have access permission.
121    // And we also need the access permission of the parent of HBase root directory, but
122    // it's not set here, because the owner of HBase root directory may don't own permission
123    // to change it's parent permission to 751.
124    // The {root/.tmp} and {root/.tmp/data} directories are created to make global user HDFS
125    // ACLs can be inherited.
126    List<Path> paths = Lists.newArrayList(pathHelper.getRootDir(), pathHelper.getMobDir(),
127      pathHelper.getTmpDir(), pathHelper.getArchiveDir());
128    paths.addAll(getGlobalRootPaths());
129    for (Path path : paths) {
130      createDirIfNotExist(path);
131      fs.setPermission(path, new FsPermission(
132        conf.get(COMMON_DIRECTORY_PERMISSION, COMMON_DIRECTORY_PERMISSION_DEFAULT)));
133    }
134    // create snapshot restore directory
135    Path restoreDir =
136      new Path(conf.get(SNAPSHOT_RESTORE_TMP_DIR, SNAPSHOT_RESTORE_TMP_DIR_DEFAULT));
137    createDirIfNotExist(restoreDir);
138    fs.setPermission(restoreDir, new FsPermission(conf.get(SNAPSHOT_RESTORE_DIRECTORY_PERMISSION,
139      SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT)));
140  }
141
142  /**
143   * Set acl when grant user permission
144   * @param userPermission the user and permission
145   * @param skipNamespaces the namespace set to skip set acl because already set
146   * @param skipTables     the table set to skip set acl because already set
147   * @return false if an error occurred, otherwise true
148   */
149  public boolean grantAcl(UserPermission userPermission, Set<String> skipNamespaces,
150    Set<TableName> skipTables) {
151    try {
152      long start = EnvironmentEdgeManager.currentTime();
153      handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.MODIFY, skipNamespaces,
154        skipTables);
155      LOG.info("Set HDFS acl when grant {}, cost {} ms", userPermission,
156        EnvironmentEdgeManager.currentTime() - start);
157      return true;
158    } catch (Exception e) {
159      LOG.error("Set HDFS acl error when grant: {}", userPermission, e);
160      return false;
161    }
162  }
163
164  /**
165   * Remove acl when grant or revoke user permission
166   * @param userPermission the user and permission
167   * @param skipNamespaces the namespace set to skip remove acl
168   * @param skipTables     the table set to skip remove acl
169   * @return false if an error occurred, otherwise true
170   */
171  public boolean revokeAcl(UserPermission userPermission, Set<String> skipNamespaces,
172    Set<TableName> skipTables) {
173    try {
174      long start = EnvironmentEdgeManager.currentTime();
175      handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.REMOVE, skipNamespaces,
176        skipTables);
177      LOG.info("Set HDFS acl when revoke {}, cost {} ms", userPermission,
178        EnvironmentEdgeManager.currentTime() - start);
179      return true;
180    } catch (Exception e) {
181      LOG.error("Set HDFS acl error when revoke: {}", userPermission, e);
182      return false;
183    }
184  }
185
186  /**
187   * Set acl when take a snapshot
188   * @param snapshot the snapshot desc
189   * @return false if an error occurred, otherwise true
190   */
191  public boolean snapshotAcl(SnapshotDescription snapshot) {
192    try {
193      long start = EnvironmentEdgeManager.currentTime();
194      TableName tableName = snapshot.getTableName();
195      // global user permission can be inherited from default acl automatically
196      Set<String> userSet = getUsersWithTableReadAction(tableName, true, false);
197      if (userSet.size() > 0) {
198        Path path = pathHelper.getSnapshotDir(snapshot.getName());
199        handleHDFSAcl(new HDFSAclOperation(fs, path, userSet, HDFSAclOperation.OperationType.MODIFY,
200          true, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).get();
201      }
202      LOG.info("Set HDFS acl when snapshot {}, cost {} ms", snapshot.getName(),
203        EnvironmentEdgeManager.currentTime() - start);
204      return true;
205    } catch (Exception e) {
206      LOG.error("Set HDFS acl error when snapshot {}", snapshot, e);
207      return false;
208    }
209  }
210
211  /**
212   * Remove table access acl from namespace dir when delete table
213   * @param tableName   the table
214   * @param removeUsers the users whose access acl will be removed
215   * @return false if an error occurred, otherwise true
216   */
217  public boolean removeNamespaceAccessAcl(TableName tableName, Set<String> removeUsers,
218    String operation) {
219    try {
220      long start = EnvironmentEdgeManager.currentTime();
221      if (removeUsers.size() > 0) {
222        handleNamespaceAccessAcl(tableName.getNamespaceAsString(), removeUsers,
223          HDFSAclOperation.OperationType.REMOVE);
224      }
225      LOG.info("Remove HDFS acl when {} table {}, cost {} ms", operation, tableName,
226        EnvironmentEdgeManager.currentTime() - start);
227      return true;
228    } catch (Exception e) {
229      LOG.error("Remove HDFS acl error when {} table {}", operation, tableName, e);
230      return false;
231    }
232  }
233
234  /**
235   * Remove default acl from namespace archive dir when delete namespace
236   * @param namespace   the namespace
237   * @param removeUsers the users whose default acl will be removed
238   * @return false if an error occurred, otherwise true
239   */
240  public boolean removeNamespaceDefaultAcl(String namespace, Set<String> removeUsers) {
241    try {
242      long start = EnvironmentEdgeManager.currentTime();
243      Path archiveNsDir = pathHelper.getArchiveNsDir(namespace);
244      HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers,
245        HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
246      operation.handleAcl();
247      LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace,
248        EnvironmentEdgeManager.currentTime() - start);
249      return true;
250    } catch (Exception e) {
251      LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e);
252      return false;
253    }
254  }
255
256  /**
257   * Remove default acl from table archive dir when delete table
258   * @param tableName   the table name
259   * @param removeUsers the users whose default acl will be removed
260   * @return false if an error occurred, otherwise true
261   */
262  public boolean removeTableDefaultAcl(TableName tableName, Set<String> removeUsers) {
263    try {
264      long start = EnvironmentEdgeManager.currentTime();
265      Path archiveTableDir = pathHelper.getArchiveTableDir(tableName);
266      HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers,
267        HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
268      operation.handleAcl();
269      LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName,
270        EnvironmentEdgeManager.currentTime() - start);
271      return true;
272    } catch (Exception e) {
273      LOG.error("Remove HDFS acl error when delete table {}", tableName, e);
274      return false;
275    }
276  }
277
278  /**
279   * Add table user acls
280   * @param tableName the table
281   * @param users     the table users with READ permission
282   * @return false if an error occurred, otherwise true
283   */
284  public boolean addTableAcl(TableName tableName, Set<String> users, String operation) {
285    try {
286      long start = EnvironmentEdgeManager.currentTime();
287      if (users.size() > 0) {
288        HDFSAclOperation.OperationType operationType = HDFSAclOperation.OperationType.MODIFY;
289        handleNamespaceAccessAcl(tableName.getNamespaceAsString(), users, operationType);
290        handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0),
291          operationType);
292      }
293      LOG.info("Set HDFS acl when {} table {}, cost {} ms", operation, tableName,
294        EnvironmentEdgeManager.currentTime() - start);
295      return true;
296    } catch (Exception e) {
297      LOG.error("Set HDFS acl error when {} table {}", operation, tableName, e);
298      return false;
299    }
300  }
301
302  /**
303   * Remove table acls when modify table
304   * @param tableName the table
305   * @param users     the table users with READ permission
306   * @return false if an error occurred, otherwise true
307   */
308  public boolean removeTableAcl(TableName tableName, Set<String> users) {
309    try {
310      long start = EnvironmentEdgeManager.currentTime();
311      if (users.size() > 0) {
312        handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0),
313          HDFSAclOperation.OperationType.REMOVE);
314      }
315      LOG.info("Set HDFS acl when create or modify table {}, cost {} ms", tableName,
316        EnvironmentEdgeManager.currentTime() - start);
317      return true;
318    } catch (Exception e) {
319      LOG.error("Set HDFS acl error when create or modify table {}", tableName, e);
320      return false;
321    }
322  }
323
324  private void handleGrantOrRevokeAcl(UserPermission userPermission,
325    HDFSAclOperation.OperationType operationType, Set<String> skipNamespaces,
326    Set<TableName> skipTables) throws ExecutionException, InterruptedException, IOException {
327    Set<String> users = Sets.newHashSet(userPermission.getUser());
328    switch (userPermission.getAccessScope()) {
329      case GLOBAL:
330        handleGlobalAcl(users, skipNamespaces, skipTables, operationType);
331        break;
332      case NAMESPACE:
333        NamespacePermission namespacePermission =
334          (NamespacePermission) userPermission.getPermission();
335        handleNamespaceAcl(Sets.newHashSet(namespacePermission.getNamespace()), users,
336          skipNamespaces, skipTables, operationType);
337        break;
338      case TABLE:
339        TablePermission tablePermission = (TablePermission) userPermission.getPermission();
340        handleNamespaceAccessAcl(tablePermission.getNamespace(), users, operationType);
341        handleTableAcl(Sets.newHashSet(tablePermission.getTableName()), users, skipNamespaces,
342          skipTables, operationType);
343        break;
344      default:
345        throw new IllegalArgumentException(
346          "Illegal user permission scope " + userPermission.getAccessScope());
347    }
348  }
349
350  private void handleGlobalAcl(Set<String> users, Set<String> skipNamespaces,
351    Set<TableName> skipTables, HDFSAclOperation.OperationType operationType)
352    throws ExecutionException, InterruptedException, IOException {
353    // handle global root directories HDFS acls
354    List<HDFSAclOperation> hdfsAclOperations =
355      getGlobalRootPaths().stream().map(path -> new HDFSAclOperation(fs, path, users, operationType,
356        false, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).collect(Collectors.toList());
357    handleHDFSAclParallel(hdfsAclOperations).get();
358    // handle namespace HDFS acls
359    handleNamespaceAcl(Sets.newHashSet(admin.listNamespaces()), users, skipNamespaces, skipTables,
360      operationType);
361  }
362
363  private void handleNamespaceAcl(Set<String> namespaces, Set<String> users,
364    Set<String> skipNamespaces, Set<TableName> skipTables,
365    HDFSAclOperation.OperationType operationType)
366    throws ExecutionException, InterruptedException, IOException {
367    namespaces.removeAll(skipNamespaces);
368    namespaces.remove(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
369    // handle namespace root directories HDFS acls
370    List<HDFSAclOperation> hdfsAclOperations = new ArrayList<>();
371    Set<String> skipTableNamespaces =
372      skipTables.stream().map(TableName::getNamespaceAsString).collect(Collectors.toSet());
373    for (String ns : namespaces) {
374      /**
375       * When op is REMOVE, remove the DEFAULT namespace ACL while keep the ACCESS for skipTables,
376       * otherwise remove both the DEFAULT + ACCESS ACLs. When op is MODIFY, just operate the
377       * DEFAULT + ACCESS ACLs.
378       */
379      HDFSAclOperation.OperationType op = operationType;
380      HDFSAclOperation.AclType aclType = HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS;
381      if (
382        operationType == HDFSAclOperation.OperationType.REMOVE && skipTableNamespaces.contains(ns)
383      ) {
384        // remove namespace directories default HDFS acls for skip tables
385        op = HDFSAclOperation.OperationType.REMOVE;
386        aclType = HDFSAclOperation.AclType.DEFAULT;
387      }
388      for (Path path : getNamespaceRootPaths(ns)) {
389        hdfsAclOperations.add(new HDFSAclOperation(fs, path, users, op, false, aclType));
390      }
391    }
392    handleHDFSAclParallel(hdfsAclOperations).get();
393    // handle table directories HDFS acls
394    Set<TableName> tables = new HashSet<>();
395    for (String namespace : namespaces) {
396      tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream()
397        .filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName)
398        .collect(Collectors.toSet()));
399    }
400    handleTableAcl(tables, users, skipNamespaces, skipTables, operationType);
401  }
402
403  private void handleTableAcl(Set<TableName> tableNames, Set<String> users,
404    Set<String> skipNamespaces, Set<TableName> skipTables,
405    HDFSAclOperation.OperationType operationType)
406    throws ExecutionException, InterruptedException, IOException {
407    Set<TableName> filterTableNames = new HashSet<>();
408    for (TableName tableName : tableNames) {
409      if (
410        !skipTables.contains(tableName)
411          && !skipNamespaces.contains(tableName.getNamespaceAsString())
412      ) {
413        filterTableNames.add(tableName);
414      }
415    }
416    List<CompletableFuture<Void>> futures = new ArrayList<>();
417    // handle table HDFS acls
418    for (TableName tableName : filterTableNames) {
419      List<HDFSAclOperation> hdfsAclOperations = getTableRootPaths(tableName, true).stream()
420        .map(path -> new HDFSAclOperation(fs, path, users, operationType, true,
421          HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS))
422        .collect(Collectors.toList());
423      CompletableFuture<Void> future = handleHDFSAclSequential(hdfsAclOperations);
424      futures.add(future);
425    }
426    CompletableFuture<Void> future =
427      CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
428    future.get();
429  }
430
431  private void handleNamespaceAccessAcl(String namespace, Set<String> users,
432    HDFSAclOperation.OperationType operationType) throws ExecutionException, InterruptedException {
433    // handle namespace access HDFS acls
434    List<HDFSAclOperation> hdfsAclOperations =
435      getNamespaceRootPaths(namespace).stream().map(path -> new HDFSAclOperation(fs, path, users,
436        operationType, false, HDFSAclOperation.AclType.ACCESS)).collect(Collectors.toList());
437    CompletableFuture<Void> future = handleHDFSAclParallel(hdfsAclOperations);
438    future.get();
439  }
440
441  void createTableDirectories(TableName tableName) throws IOException {
442    List<Path> paths = getTableRootPaths(tableName, false);
443    for (Path path : paths) {
444      createDirIfNotExist(path);
445    }
446  }
447
448  /**
449   * return paths that user will global permission will visit
450   * @return the path list
451   */
452  List<Path> getGlobalRootPaths() {
453    return Lists.newArrayList(pathHelper.getTmpDataDir(), pathHelper.getDataDir(),
454      pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), pathHelper.getSnapshotRootDir());
455  }
456
457  /**
458   * return paths that user will namespace permission will visit
459   * @param namespace the namespace
460   * @return the path list
461   */
462  List<Path> getNamespaceRootPaths(String namespace) {
463    return Lists.newArrayList(pathHelper.getTmpNsDir(namespace), pathHelper.getDataNsDir(namespace),
464      pathHelper.getMobDataNsDir(namespace), pathHelper.getArchiveNsDir(namespace));
465  }
466
467  /**
468   * return paths that user will table permission will visit
469   * @param tableName           the table
470   * @param includeSnapshotPath true if return table snapshots paths, otherwise false
471   * @return the path list
472   * @throws IOException if an error occurred
473   */
474  List<Path> getTableRootPaths(TableName tableName, boolean includeSnapshotPath)
475    throws IOException {
476    List<Path> paths = Lists.newArrayList(pathHelper.getDataTableDir(tableName),
477      pathHelper.getMobTableDir(tableName), pathHelper.getArchiveTableDir(tableName));
478    if (includeSnapshotPath) {
479      paths.addAll(getTableSnapshotPaths(tableName));
480    }
481    return paths;
482  }
483
484  private List<Path> getTableSnapshotPaths(TableName tableName) throws IOException {
485    return admin.listSnapshots().stream()
486      .filter(snapDesc -> snapDesc.getTableName().equals(tableName))
487      .map(snapshotDescription -> pathHelper.getSnapshotDir(snapshotDescription.getName()))
488      .collect(Collectors.toList());
489  }
490
491  /**
492   * Return users with global read permission
493   * @return users with global read permission
494   * @throws IOException if an error occurred
495   */
496  private Set<String> getUsersWithGlobalReadAction() throws IOException {
497    return getUsersWithReadAction(PermissionStorage.getGlobalPermissions(conf));
498  }
499
500  /**
501   * Return users with namespace read permission
502   * @param namespace     the namespace
503   * @param includeGlobal true if include users with global read action
504   * @return users with namespace read permission
505   * @throws IOException if an error occurred
506   */
507  Set<String> getUsersWithNamespaceReadAction(String namespace, boolean includeGlobal)
508    throws IOException {
509    Set<String> users =
510      getUsersWithReadAction(PermissionStorage.getNamespacePermissions(conf, namespace));
511    if (includeGlobal) {
512      users.addAll(getUsersWithGlobalReadAction());
513    }
514    return users;
515  }
516
517  /**
518   * Return users with table read permission
519   * @param tableName        the table
520   * @param includeNamespace true if include users with namespace read action
521   * @param includeGlobal    true if include users with global read action
522   * @return users with table read permission
523   * @throws IOException if an error occurred
524   */
525  Set<String> getUsersWithTableReadAction(TableName tableName, boolean includeNamespace,
526    boolean includeGlobal) throws IOException {
527    Set<String> users =
528      getUsersWithReadAction(PermissionStorage.getTablePermissions(conf, tableName));
529    if (includeNamespace) {
530      users
531        .addAll(getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), includeGlobal));
532    }
533    return users;
534  }
535
536  private Set<String>
537    getUsersWithReadAction(ListMultimap<String, UserPermission> permissionMultimap) {
538    return permissionMultimap.entries().stream()
539      .filter(entry -> checkUserPermission(entry.getValue())).map(Map.Entry::getKey)
540      .collect(Collectors.toSet());
541  }
542
543  private boolean checkUserPermission(UserPermission userPermission) {
544    boolean result = containReadAction(userPermission);
545    if (result && userPermission.getPermission() instanceof TablePermission) {
546      result = isNotFamilyOrQualifierPermission((TablePermission) userPermission.getPermission());
547    }
548    return result;
549  }
550
551  boolean containReadAction(UserPermission userPermission) {
552    return userPermission.getPermission().implies(Permission.Action.READ);
553  }
554
555  boolean isNotFamilyOrQualifierPermission(TablePermission tablePermission) {
556    return !tablePermission.hasFamily() && !tablePermission.hasQualifier();
557  }
558
559  public static boolean isAclSyncToHdfsEnabled(Configuration conf) {
560    String[] masterCoprocessors = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
561    Set<String> masterCoprocessorSet = new HashSet<>();
562    if (masterCoprocessors != null) {
563      Collections.addAll(masterCoprocessorSet, masterCoprocessors);
564    }
565    return conf.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)
566      && masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName())
567      && masterCoprocessorSet.contains(AccessController.class.getName());
568  }
569
570  boolean isAclSyncToHdfsEnabled(TableDescriptor tableDescriptor) {
571    return tableDescriptor == null
572      ? false
573      : Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE));
574  }
575
576  PathHelper getPathHelper() {
577    return pathHelper;
578  }
579
580  private CompletableFuture<Void> handleHDFSAcl(HDFSAclOperation acl) {
581    return CompletableFuture.supplyAsync(() -> {
582      List<HDFSAclOperation> childAclOperations = new ArrayList<>();
583      try {
584        acl.handleAcl();
585        childAclOperations = acl.getChildAclOperations();
586      } catch (FileNotFoundException e) {
587        // Skip handle acl if file not found
588      } catch (IOException e) {
589        LOG.error("Set HDFS acl error for path {}", acl.path, e);
590      }
591      return childAclOperations;
592    }, pool).thenComposeAsync(this::handleHDFSAclParallel, pool);
593  }
594
595  private CompletableFuture<Void> handleHDFSAclSequential(List<HDFSAclOperation> operations) {
596    return CompletableFuture.supplyAsync(() -> {
597      try {
598        for (HDFSAclOperation hdfsAclOperation : operations) {
599          handleHDFSAcl(hdfsAclOperation).get();
600        }
601      } catch (InterruptedException | ExecutionException e) {
602        LOG.error("Set HDFS acl error", e);
603      }
604      return null;
605    }, pool);
606  }
607
608  private CompletableFuture<Void> handleHDFSAclParallel(List<HDFSAclOperation> operations) {
609    List<CompletableFuture<Void>> futures =
610      operations.stream().map(this::handleHDFSAcl).collect(Collectors.toList());
611    return CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
612  }
613
614  private static AclEntry aclEntry(AclEntryScope scope, String name) {
615    return new AclEntry.Builder().setScope(scope)
616      .setType(AuthUtil.isGroupPrincipal(name) ? GROUP : USER).setName(name)
617      .setPermission(READ_EXECUTE).build();
618  }
619
620  void createDirIfNotExist(Path path) throws IOException {
621    if (!fs.exists(path)) {
622      fs.mkdirs(path);
623    }
624  }
625
626  void deleteEmptyDir(Path path) throws IOException {
627    if (fs.exists(path) && fs.listStatus(path).length == 0) {
628      fs.delete(path, false);
629    }
630  }
631
632  /**
633   * Inner class used to describe modify or remove what type of acl entries(ACCESS, DEFAULT,
634   * ACCESS_AND_DEFAULT) for files or directories(and child files).
635   */
636  private static class HDFSAclOperation {
637    enum OperationType {
638      MODIFY,
639      REMOVE
640    }
641
642    enum AclType {
643      ACCESS,
644      DEFAULT,
645      DEFAULT_ADN_ACCESS
646    }
647
648    private interface Operation {
649      void apply(FileSystem fs, Path path, List<AclEntry> aclList) throws IOException;
650    }
651
652    private FileSystem fs;
653    private Path path;
654    private Operation operation;
655    private boolean recursive;
656    private AclType aclType;
657    private List<AclEntry> defaultAndAccessAclEntries;
658    private List<AclEntry> accessAclEntries;
659    private List<AclEntry> defaultAclEntries;
660
661    HDFSAclOperation(FileSystem fs, Path path, Set<String> users, OperationType operationType,
662      boolean recursive, AclType aclType) {
663      this.fs = fs;
664      this.path = path;
665      this.defaultAndAccessAclEntries = getAclEntries(AclType.DEFAULT_ADN_ACCESS, users);
666      this.accessAclEntries = getAclEntries(AclType.ACCESS, users);
667      this.defaultAclEntries = getAclEntries(AclType.DEFAULT, users);
668      if (operationType == OperationType.MODIFY) {
669        operation = FileSystem::modifyAclEntries;
670      } else if (operationType == OperationType.REMOVE) {
671        operation = FileSystem::removeAclEntries;
672      } else {
673        throw new IllegalArgumentException("Illegal HDFS acl operation type: " + operationType);
674      }
675      this.recursive = recursive;
676      this.aclType = aclType;
677    }
678
679    HDFSAclOperation(Path path, HDFSAclOperation parent) {
680      this.fs = parent.fs;
681      this.path = path;
682      this.defaultAndAccessAclEntries = parent.defaultAndAccessAclEntries;
683      this.accessAclEntries = parent.accessAclEntries;
684      this.defaultAclEntries = parent.defaultAclEntries;
685      this.operation = parent.operation;
686      this.recursive = parent.recursive;
687      this.aclType = parent.aclType;
688    }
689
690    List<HDFSAclOperation> getChildAclOperations() throws IOException {
691      List<HDFSAclOperation> hdfsAclOperations = new ArrayList<>();
692      if (recursive && fs.isDirectory(path)) {
693        FileStatus[] fileStatuses = fs.listStatus(path);
694        for (FileStatus fileStatus : fileStatuses) {
695          hdfsAclOperations.add(new HDFSAclOperation(fileStatus.getPath(), this));
696        }
697      }
698      return hdfsAclOperations;
699    }
700
701    void handleAcl() throws IOException {
702      if (fs.exists(path)) {
703        if (fs.isDirectory(path)) {
704          switch (aclType) {
705            case ACCESS:
706              operation.apply(fs, path, accessAclEntries);
707              break;
708            case DEFAULT:
709              operation.apply(fs, path, defaultAclEntries);
710              break;
711            case DEFAULT_ADN_ACCESS:
712              operation.apply(fs, path, defaultAndAccessAclEntries);
713              break;
714            default:
715              throw new IllegalArgumentException("Illegal HDFS acl type: " + aclType);
716          }
717        } else {
718          operation.apply(fs, path, accessAclEntries);
719        }
720      }
721    }
722
723    private List<AclEntry> getAclEntries(AclType aclType, Set<String> users) {
724      List<AclEntry> aclEntries = new ArrayList<>();
725      switch (aclType) {
726        case ACCESS:
727          for (String user : users) {
728            aclEntries.add(aclEntry(ACCESS, user));
729          }
730          break;
731        case DEFAULT:
732          for (String user : users) {
733            aclEntries.add(aclEntry(DEFAULT, user));
734          }
735          break;
736        case DEFAULT_ADN_ACCESS:
737          for (String user : users) {
738            aclEntries.add(aclEntry(ACCESS, user));
739            aclEntries.add(aclEntry(DEFAULT, user));
740          }
741          break;
742        default:
743          throw new IllegalArgumentException("Illegal HDFS acl type: " + aclType);
744      }
745      return aclEntries;
746    }
747  }
748
749  static final class PathHelper {
750    Configuration conf;
751    Path rootDir;
752    Path tmpDataDir;
753    Path dataDir;
754    Path mobDataDir;
755    Path archiveDataDir;
756    Path snapshotDir;
757
758    PathHelper(Configuration conf) {
759      this.conf = conf;
760      rootDir = new Path(conf.get(HConstants.HBASE_DIR));
761      tmpDataDir =
762        new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), HConstants.BASE_NAMESPACE_DIR);
763      dataDir = new Path(rootDir, HConstants.BASE_NAMESPACE_DIR);
764      mobDataDir = new Path(MobUtils.getMobHome(rootDir), HConstants.BASE_NAMESPACE_DIR);
765      archiveDataDir = new Path(new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY),
766        HConstants.BASE_NAMESPACE_DIR);
767      snapshotDir = new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
768    }
769
770    Path getRootDir() {
771      return rootDir;
772    }
773
774    Path getDataDir() {
775      return dataDir;
776    }
777
778    Path getMobDir() {
779      return mobDataDir.getParent();
780    }
781
782    Path getMobDataDir() {
783      return mobDataDir;
784    }
785
786    Path getTmpDir() {
787      return new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY);
788    }
789
790    Path getTmpDataDir() {
791      return tmpDataDir;
792    }
793
794    Path getArchiveDir() {
795      return new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
796    }
797
798    Path getArchiveDataDir() {
799      return archiveDataDir;
800    }
801
802    Path getDataNsDir(String namespace) {
803      return new Path(dataDir, namespace);
804    }
805
806    Path getMobDataNsDir(String namespace) {
807      return new Path(mobDataDir, namespace);
808    }
809
810    Path getDataTableDir(TableName tableName) {
811      return new Path(getDataNsDir(tableName.getNamespaceAsString()),
812        tableName.getQualifierAsString());
813    }
814
815    Path getMobTableDir(TableName tableName) {
816      return new Path(getMobDataNsDir(tableName.getNamespaceAsString()),
817        tableName.getQualifierAsString());
818    }
819
820    Path getArchiveNsDir(String namespace) {
821      return new Path(archiveDataDir, namespace);
822    }
823
824    Path getArchiveTableDir(TableName tableName) {
825      return new Path(getArchiveNsDir(tableName.getNamespaceAsString()),
826        tableName.getQualifierAsString());
827    }
828
829    Path getTmpNsDir(String namespace) {
830      return new Path(tmpDataDir, namespace);
831    }
832
833    Path getTmpTableDir(TableName tableName) {
834      return new Path(getTmpNsDir(tableName.getNamespaceAsString()),
835        tableName.getQualifierAsString());
836    }
837
838    Path getSnapshotRootDir() {
839      return snapshotDir;
840    }
841
842    Path getSnapshotDir(String snapshot) {
843      return new Path(snapshotDir, snapshot);
844    }
845
846    FileSystem getFileSystem() throws IOException {
847      return rootDir.getFileSystem(conf);
848    }
849  }
850}