001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.security.access;
019
020import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
021import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
022import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
023import static org.apache.hadoop.fs.permission.AclEntryType.USER;
024import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
025
026import java.io.Closeable;
027import java.io.FileNotFoundException;
028import java.io.IOException;
029import java.util.ArrayList;
030import java.util.Collections;
031import java.util.HashSet;
032import java.util.List;
033import java.util.Map;
034import java.util.Set;
035import java.util.concurrent.CompletableFuture;
036import java.util.concurrent.ExecutionException;
037import java.util.concurrent.ExecutorService;
038import java.util.concurrent.Executors;
039import java.util.stream.Collectors;
040import org.apache.hadoop.conf.Configuration;
041import org.apache.hadoop.fs.FileStatus;
042import org.apache.hadoop.fs.FileSystem;
043import org.apache.hadoop.fs.Path;
044import org.apache.hadoop.fs.permission.AclEntry;
045import org.apache.hadoop.fs.permission.AclEntryScope;
046import org.apache.hadoop.fs.permission.FsPermission;
047import org.apache.hadoop.hbase.AuthUtil;
048import org.apache.hadoop.hbase.HConstants;
049import org.apache.hadoop.hbase.NamespaceDescriptor;
050import org.apache.hadoop.hbase.TableName;
051import org.apache.hadoop.hbase.client.Admin;
052import org.apache.hadoop.hbase.client.Connection;
053import org.apache.hadoop.hbase.client.SnapshotDescription;
054import org.apache.hadoop.hbase.client.TableDescriptor;
055import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
056import org.apache.hadoop.hbase.mob.MobUtils;
057import org.apache.hadoop.hbase.util.Bytes;
058import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
059import org.apache.yetus.audience.InterfaceAudience;
060import org.slf4j.Logger;
061import org.slf4j.LoggerFactory;
062
063import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
064import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
065import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
066import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
067
068/**
069 * A helper to modify or remove HBase granted user default and access HDFS ACLs over hFiles.
070 */
071@InterfaceAudience.Private
072public class SnapshotScannerHDFSAclHelper implements Closeable {
073  private static final Logger LOG = LoggerFactory.getLogger(SnapshotScannerHDFSAclHelper.class);
074
075  public static final String ACL_SYNC_TO_HDFS_ENABLE = "hbase.acl.sync.to.hdfs.enable";
076  public static final String ACL_SYNC_TO_HDFS_THREAD_NUMBER =
077    "hbase.acl.sync.to.hdfs.thread.number";
078  // The tmp directory to restore snapshot, it can not be a sub directory of HBase root dir
079  public static final String SNAPSHOT_RESTORE_TMP_DIR = "hbase.snapshot.restore.tmp.dir";
080  public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT =
081    "/hbase/.tmpdir-to-restore-snapshot";
082  // The default permission of the common directories if the feature is enabled.
083  public static final String COMMON_DIRECTORY_PERMISSION =
084    "hbase.acl.sync.to.hdfs.common.directory.permission";
085  // The secure HBase permission is 700, 751 means all others have execute access and the mask is
086  // set to read-execute to make the extended access ACL entries can work. Be cautious to set
087  // this value.
088  public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751";
089  // The default permission of the snapshot restore directories if the feature is enabled.
090  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION =
091    "hbase.acl.sync.to.hdfs.restore.directory.permission";
092  // 753 means all others have write-execute access.
093  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = "753";
094
095  private Admin admin;
096  private final Configuration conf;
097  private FileSystem fs;
098  private PathHelper pathHelper;
099  private ExecutorService pool;
100
101  public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection connection)
102    throws IOException {
103    this.conf = configuration;
104    this.pathHelper = new PathHelper(conf);
105    this.fs = pathHelper.getFileSystem();
106    this.pool = Executors.newFixedThreadPool(conf.getInt(ACL_SYNC_TO_HDFS_THREAD_NUMBER, 10),
107      new ThreadFactoryBuilder().setNameFormat("hdfs-acl-thread-%d").setDaemon(true).build());
108    this.admin = connection.getAdmin();
109  }
110
111  @Override
112  public void close() {
113    if (pool != null) {
114      pool.shutdown();
115    }
116    try {
117      admin.close();
118    } catch (IOException e) {
119      LOG.error("Close admin error", e);
120    }
121  }
122
123  public void setCommonDirectoryPermission() throws IOException {
124    // Set public directory permission to 751 to make all users have access permission.
125    // And we also need the access permission of the parent of HBase root directory, but
126    // it's not set here, because the owner of HBase root directory may don't own permission
127    // to change it's parent permission to 751.
128    // The {root/.tmp} and {root/.tmp/data} directories are created to make global user HDFS
129    // ACLs can be inherited.
130    List<Path> paths = Lists.newArrayList(pathHelper.getRootDir(), pathHelper.getMobDir(),
131      pathHelper.getTmpDir(), pathHelper.getArchiveDir());
132    paths.addAll(getGlobalRootPaths());
133    for (Path path : paths) {
134      createDirIfNotExist(path);
135      fs.setPermission(path, new FsPermission(
136        conf.get(COMMON_DIRECTORY_PERMISSION, COMMON_DIRECTORY_PERMISSION_DEFAULT)));
137    }
138    // create snapshot restore directory
139    Path restoreDir =
140      new Path(conf.get(SNAPSHOT_RESTORE_TMP_DIR, SNAPSHOT_RESTORE_TMP_DIR_DEFAULT));
141    createDirIfNotExist(restoreDir);
142    fs.setPermission(restoreDir, new FsPermission(conf.get(SNAPSHOT_RESTORE_DIRECTORY_PERMISSION,
143      SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT)));
144  }
145
146  /**
147   * Set acl when grant user permission
148   * @param userPermission the user and permission
149   * @param skipNamespaces the namespace set to skip set acl because already set
150   * @param skipTables     the table set to skip set acl because already set
151   * @return false if an error occurred, otherwise true
152   */
153  public boolean grantAcl(UserPermission userPermission, Set<String> skipNamespaces,
154    Set<TableName> skipTables) {
155    try {
156      long start = EnvironmentEdgeManager.currentTime();
157      handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.MODIFY, skipNamespaces,
158        skipTables);
159      LOG.info("Set HDFS acl when grant {}, skipNamespaces: {}, skipTables: {}, cost {} ms",
160        userPermission, skipNamespaces, skipTables, EnvironmentEdgeManager.currentTime() - start);
161      return true;
162    } catch (Exception e) {
163      LOG.error("Set HDFS acl error when grant: {}, skipNamespaces: {}, skipTables: {}",
164        userPermission, skipNamespaces, skipTables, e);
165      return false;
166    }
167  }
168
169  /**
170   * Remove acl when grant or revoke user permission
171   * @param userPermission the user and permission
172   * @param skipNamespaces the namespace set to skip remove acl
173   * @param skipTables     the table set to skip remove acl
174   * @return false if an error occurred, otherwise true
175   */
176  public boolean revokeAcl(UserPermission userPermission, Set<String> skipNamespaces,
177    Set<TableName> skipTables) {
178    try {
179      long start = EnvironmentEdgeManager.currentTime();
180      handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.REMOVE, skipNamespaces,
181        skipTables);
182      LOG.info("Set HDFS acl when revoke {}, skipNamespaces: {}, skipTables: {}, cost {} ms",
183        userPermission, skipNamespaces, skipTables, EnvironmentEdgeManager.currentTime() - start);
184      return true;
185    } catch (Exception e) {
186      LOG.error("Set HDFS acl error when revoke: {}, skipNamespaces: {}, skipTables: {}",
187        userPermission, skipNamespaces, skipTables, e);
188      return false;
189    }
190  }
191
192  /**
193   * Set acl when take a snapshot
194   * @param snapshot the snapshot desc
195   * @return false if an error occurred, otherwise true
196   */
197  public boolean snapshotAcl(SnapshotDescription snapshot) {
198    try {
199      long start = EnvironmentEdgeManager.currentTime();
200      TableName tableName = snapshot.getTableName();
201      // global user permission can be inherited from default acl automatically
202      Set<String> userSet = getUsersWithTableReadAction(tableName, true, false);
203      if (userSet.size() > 0) {
204        Path path = pathHelper.getSnapshotDir(snapshot.getName());
205        handleHDFSAcl(new HDFSAclOperation(fs, path, userSet, HDFSAclOperation.OperationType.MODIFY,
206          true, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).get();
207      }
208      LOG.info("Set HDFS acl when snapshot {}, cost {} ms", snapshot.getName(),
209        EnvironmentEdgeManager.currentTime() - start);
210      return true;
211    } catch (Exception e) {
212      LOG.error("Set HDFS acl error when snapshot {}", snapshot, e);
213      return false;
214    }
215  }
216
217  /**
218   * Remove table access acl from namespace dir when delete table
219   * @param tableName   the table
220   * @param removeUsers the users whose access acl will be removed
221   * @return false if an error occurred, otherwise true
222   */
223  public boolean removeNamespaceAccessAcl(TableName tableName, Set<String> removeUsers,
224    String operation) {
225    try {
226      long start = EnvironmentEdgeManager.currentTime();
227      if (removeUsers.size() > 0) {
228        handleNamespaceAccessAcl(tableName.getNamespaceAsString(), removeUsers,
229          HDFSAclOperation.OperationType.REMOVE);
230      }
231      LOG.info("Remove HDFS acl when {} table {}, cost {} ms", operation, tableName,
232        EnvironmentEdgeManager.currentTime() - start);
233      return true;
234    } catch (Exception e) {
235      LOG.error("Remove HDFS acl error when {} table {}", operation, tableName, e);
236      return false;
237    }
238  }
239
240  /**
241   * Remove default acl from namespace archive dir when delete namespace
242   * @param namespace   the namespace
243   * @param removeUsers the users whose default acl will be removed
244   * @return false if an error occurred, otherwise true
245   */
246  public boolean removeNamespaceDefaultAcl(String namespace, Set<String> removeUsers) {
247    try {
248      long start = EnvironmentEdgeManager.currentTime();
249      Path archiveNsDir = pathHelper.getArchiveNsDir(namespace);
250      HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers,
251        HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
252      operation.handleAcl();
253      LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace,
254        EnvironmentEdgeManager.currentTime() - start);
255      return true;
256    } catch (Exception e) {
257      LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e);
258      return false;
259    }
260  }
261
262  /**
263   * Remove default acl from table archive dir when delete table
264   * @param tableName   the table name
265   * @param removeUsers the users whose default acl will be removed
266   * @return false if an error occurred, otherwise true
267   */
268  public boolean removeTableDefaultAcl(TableName tableName, Set<String> removeUsers) {
269    try {
270      long start = EnvironmentEdgeManager.currentTime();
271      Path archiveTableDir = pathHelper.getArchiveTableDir(tableName);
272      HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers,
273        HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
274      operation.handleAcl();
275      LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName,
276        EnvironmentEdgeManager.currentTime() - start);
277      return true;
278    } catch (Exception e) {
279      LOG.error("Remove HDFS acl error when delete table {}", tableName, e);
280      return false;
281    }
282  }
283
284  /**
285   * Add table user acls
286   * @param tableName the table
287   * @param users     the table users with READ permission
288   * @return false if an error occurred, otherwise true
289   */
290  public boolean addTableAcl(TableName tableName, Set<String> users, String operation) {
291    try {
292      long start = EnvironmentEdgeManager.currentTime();
293      if (users.size() > 0) {
294        HDFSAclOperation.OperationType operationType = HDFSAclOperation.OperationType.MODIFY;
295        handleNamespaceAccessAcl(tableName.getNamespaceAsString(), users, operationType);
296        handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0),
297          operationType);
298      }
299      LOG.info("Set HDFS acl when {} table {}, cost {} ms", operation, tableName,
300        EnvironmentEdgeManager.currentTime() - start);
301      return true;
302    } catch (Exception e) {
303      LOG.error("Set HDFS acl error when {} table {}", operation, tableName, e);
304      return false;
305    }
306  }
307
308  /**
309   * Remove table acls when modify table
310   * @param tableName the table
311   * @param users     the table users with READ permission
312   * @return false if an error occurred, otherwise true
313   */
314  public boolean removeTableAcl(TableName tableName, Set<String> users) {
315    try {
316      long start = EnvironmentEdgeManager.currentTime();
317      if (users.size() > 0) {
318        handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0),
319          HDFSAclOperation.OperationType.REMOVE);
320      }
321      LOG.info("Set HDFS acl when create or modify table {}, cost {} ms", tableName,
322        EnvironmentEdgeManager.currentTime() - start);
323      return true;
324    } catch (Exception e) {
325      LOG.error("Set HDFS acl error when create or modify table {}", tableName, e);
326      return false;
327    }
328  }
329
330  private void handleGrantOrRevokeAcl(UserPermission userPermission,
331    HDFSAclOperation.OperationType operationType, Set<String> skipNamespaces,
332    Set<TableName> skipTables) throws ExecutionException, InterruptedException, IOException {
333    Set<String> users = Sets.newHashSet(userPermission.getUser());
334    switch (userPermission.getAccessScope()) {
335      case GLOBAL:
336        handleGlobalAcl(users, skipNamespaces, skipTables, operationType);
337        break;
338      case NAMESPACE:
339        NamespacePermission namespacePermission =
340          (NamespacePermission) userPermission.getPermission();
341        handleNamespaceAcl(Sets.newHashSet(namespacePermission.getNamespace()), users,
342          skipNamespaces, skipTables, operationType);
343        break;
344      case TABLE:
345        TablePermission tablePermission = (TablePermission) userPermission.getPermission();
346        handleNamespaceAccessAcl(tablePermission.getNamespace(), users, operationType);
347        handleTableAcl(Sets.newHashSet(tablePermission.getTableName()), users, skipNamespaces,
348          skipTables, operationType);
349        break;
350      default:
351        throw new IllegalArgumentException(
352          "Illegal user permission scope " + userPermission.getAccessScope());
353    }
354  }
355
356  private void handleGlobalAcl(Set<String> users, Set<String> skipNamespaces,
357    Set<TableName> skipTables, HDFSAclOperation.OperationType operationType)
358    throws ExecutionException, InterruptedException, IOException {
359    // handle global root directories HDFS acls
360    List<HDFSAclOperation> hdfsAclOperations =
361      getGlobalRootPaths().stream().map(path -> new HDFSAclOperation(fs, path, users, operationType,
362        false, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).collect(Collectors.toList());
363    handleHDFSAclParallel(hdfsAclOperations).get();
364    // handle namespace HDFS acls
365    handleNamespaceAcl(Sets.newHashSet(admin.listNamespaces()), users, skipNamespaces, skipTables,
366      operationType);
367  }
368
369  private void handleNamespaceAcl(Set<String> namespaces, Set<String> users,
370    Set<String> skipNamespaces, Set<TableName> skipTables,
371    HDFSAclOperation.OperationType operationType)
372    throws ExecutionException, InterruptedException, IOException {
373    namespaces.removeAll(skipNamespaces);
374    namespaces.remove(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
375    // handle namespace root directories HDFS acls
376    List<HDFSAclOperation> hdfsAclOperations = new ArrayList<>();
377    Set<String> skipTableNamespaces =
378      skipTables.stream().map(TableName::getNamespaceAsString).collect(Collectors.toSet());
379    for (String ns : namespaces) {
380      /**
381       * When op is REMOVE, remove the DEFAULT namespace ACL while keep the ACCESS for skipTables,
382       * otherwise remove both the DEFAULT + ACCESS ACLs. When op is MODIFY, just operate the
383       * DEFAULT + ACCESS ACLs.
384       */
385      HDFSAclOperation.OperationType op = operationType;
386      HDFSAclOperation.AclType aclType = HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS;
387      if (
388        operationType == HDFSAclOperation.OperationType.REMOVE && skipTableNamespaces.contains(ns)
389      ) {
390        // remove namespace directories default HDFS acls for skip tables
391        op = HDFSAclOperation.OperationType.REMOVE;
392        aclType = HDFSAclOperation.AclType.DEFAULT;
393      }
394      for (Path path : getNamespaceRootPaths(ns)) {
395        hdfsAclOperations.add(new HDFSAclOperation(fs, path, users, op, false, aclType));
396      }
397    }
398    handleHDFSAclParallel(hdfsAclOperations).get();
399    // handle table directories HDFS acls
400    Set<TableName> tables = new HashSet<>();
401    for (String namespace : namespaces) {
402      tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream()
403        .filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName)
404        .collect(Collectors.toSet()));
405    }
406    handleTableAcl(tables, users, skipNamespaces, skipTables, operationType);
407  }
408
409  private void handleTableAcl(Set<TableName> tableNames, Set<String> users,
410    Set<String> skipNamespaces, Set<TableName> skipTables,
411    HDFSAclOperation.OperationType operationType)
412    throws ExecutionException, InterruptedException, IOException {
413    Set<TableName> filterTableNames = new HashSet<>();
414    for (TableName tableName : tableNames) {
415      if (
416        !skipTables.contains(tableName)
417          && !skipNamespaces.contains(tableName.getNamespaceAsString())
418      ) {
419        filterTableNames.add(tableName);
420      }
421    }
422    List<CompletableFuture<Void>> futures = new ArrayList<>();
423    // handle table HDFS acls
424    for (TableName tableName : filterTableNames) {
425      List<HDFSAclOperation> hdfsAclOperations = getTableRootPaths(tableName, true).stream()
426        .map(path -> new HDFSAclOperation(fs, path, users, operationType, true,
427          HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS))
428        .collect(Collectors.toList());
429      CompletableFuture<Void> future = handleHDFSAclSequential(hdfsAclOperations);
430      futures.add(future);
431    }
432    CompletableFuture<Void> future =
433      CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
434    future.get();
435  }
436
437  private void handleNamespaceAccessAcl(String namespace, Set<String> users,
438    HDFSAclOperation.OperationType operationType) throws ExecutionException, InterruptedException {
439    // handle namespace access HDFS acls
440    List<HDFSAclOperation> hdfsAclOperations =
441      getNamespaceRootPaths(namespace).stream().map(path -> new HDFSAclOperation(fs, path, users,
442        operationType, false, HDFSAclOperation.AclType.ACCESS)).collect(Collectors.toList());
443    CompletableFuture<Void> future = handleHDFSAclParallel(hdfsAclOperations);
444    future.get();
445  }
446
447  void createTableDirectories(TableName tableName) throws IOException {
448    List<Path> paths = getTableRootPaths(tableName, false);
449    for (Path path : paths) {
450      createDirIfNotExist(path);
451    }
452  }
453
454  /**
455   * return paths that user will global permission will visit
456   * @return the path list
457   */
458  List<Path> getGlobalRootPaths() {
459    return Lists.newArrayList(pathHelper.getTmpDataDir(), pathHelper.getDataDir(),
460      pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), pathHelper.getSnapshotRootDir());
461  }
462
463  /**
464   * return paths that user will namespace permission will visit
465   * @param namespace the namespace
466   * @return the path list
467   */
468  List<Path> getNamespaceRootPaths(String namespace) {
469    return Lists.newArrayList(pathHelper.getTmpNsDir(namespace), pathHelper.getDataNsDir(namespace),
470      pathHelper.getMobDataNsDir(namespace), pathHelper.getArchiveNsDir(namespace));
471  }
472
473  /**
474   * return paths that user will table permission will visit
475   * @param tableName           the table
476   * @param includeSnapshotPath true if return table snapshots paths, otherwise false
477   * @return the path list
478   * @throws IOException if an error occurred
479   */
480  List<Path> getTableRootPaths(TableName tableName, boolean includeSnapshotPath)
481    throws IOException {
482    List<Path> paths = Lists.newArrayList(pathHelper.getDataTableDir(tableName),
483      pathHelper.getMobTableDir(tableName), pathHelper.getArchiveTableDir(tableName));
484    if (includeSnapshotPath) {
485      paths.addAll(getTableSnapshotPaths(tableName));
486    }
487    return paths;
488  }
489
490  private List<Path> getTableSnapshotPaths(TableName tableName) throws IOException {
491    return admin.listSnapshots().stream()
492      .filter(snapDesc -> snapDesc.getTableName().equals(tableName))
493      .map(snapshotDescription -> pathHelper.getSnapshotDir(snapshotDescription.getName()))
494      .collect(Collectors.toList());
495  }
496
497  /**
498   * Return users with global read permission
499   * @return users with global read permission
500   * @throws IOException if an error occurred
501   */
502  private Set<String> getUsersWithGlobalReadAction() throws IOException {
503    return getUsersWithReadAction(PermissionStorage.getGlobalPermissions(conf));
504  }
505
506  /**
507   * Return users with namespace read permission
508   * @param namespace     the namespace
509   * @param includeGlobal true if include users with global read action
510   * @return users with namespace read permission
511   * @throws IOException if an error occurred
512   */
513  Set<String> getUsersWithNamespaceReadAction(String namespace, boolean includeGlobal)
514    throws IOException {
515    Set<String> users =
516      getUsersWithReadAction(PermissionStorage.getNamespacePermissions(conf, namespace));
517    if (includeGlobal) {
518      users.addAll(getUsersWithGlobalReadAction());
519    }
520    return users;
521  }
522
523  /**
524   * Return users with table read permission
525   * @param tableName        the table
526   * @param includeNamespace true if include users with namespace read action
527   * @param includeGlobal    true if include users with global read action
528   * @return users with table read permission
529   * @throws IOException if an error occurred
530   */
531  Set<String> getUsersWithTableReadAction(TableName tableName, boolean includeNamespace,
532    boolean includeGlobal) throws IOException {
533    Set<String> users =
534      getUsersWithReadAction(PermissionStorage.getTablePermissions(conf, tableName));
535    if (includeNamespace) {
536      users
537        .addAll(getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), includeGlobal));
538    }
539    return users;
540  }
541
542  private Set<String>
543    getUsersWithReadAction(ListMultimap<String, UserPermission> permissionMultimap) {
544    return permissionMultimap.entries().stream()
545      .filter(entry -> checkUserPermission(entry.getValue())).map(Map.Entry::getKey)
546      .collect(Collectors.toSet());
547  }
548
549  private boolean checkUserPermission(UserPermission userPermission) {
550    boolean result = containReadAction(userPermission);
551    if (result && userPermission.getPermission() instanceof TablePermission) {
552      result = isNotFamilyOrQualifierPermission((TablePermission) userPermission.getPermission());
553    }
554    return result;
555  }
556
557  boolean containReadAction(UserPermission userPermission) {
558    return userPermission.getPermission().implies(Permission.Action.READ);
559  }
560
561  boolean isNotFamilyOrQualifierPermission(TablePermission tablePermission) {
562    return !tablePermission.hasFamily() && !tablePermission.hasQualifier();
563  }
564
565  public static boolean isAclSyncToHdfsEnabled(Configuration conf) {
566    String[] masterCoprocessors = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
567    Set<String> masterCoprocessorSet = new HashSet<>();
568    if (masterCoprocessors != null) {
569      Collections.addAll(masterCoprocessorSet, masterCoprocessors);
570    }
571    return conf.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)
572      && masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName())
573      && masterCoprocessorSet.contains(AccessController.class.getName());
574  }
575
576  boolean isAclSyncToHdfsEnabled(TableDescriptor tableDescriptor) {
577    return tableDescriptor == null
578      ? false
579      : Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE));
580  }
581
582  PathHelper getPathHelper() {
583    return pathHelper;
584  }
585
586  private CompletableFuture<Void> handleHDFSAcl(HDFSAclOperation acl) {
587    return CompletableFuture.supplyAsync(() -> {
588      List<HDFSAclOperation> childAclOperations = new ArrayList<>();
589      try {
590        acl.handleAcl();
591        childAclOperations = acl.getChildAclOperations();
592      } catch (FileNotFoundException e) {
593        // Skip handle acl if file not found
594      } catch (IOException e) {
595        LOG.error("Set HDFS acl error for path {}", acl.path, e);
596      }
597      return childAclOperations;
598    }, pool).thenComposeAsync(this::handleHDFSAclParallel, pool);
599  }
600
601  private CompletableFuture<Void> handleHDFSAclSequential(List<HDFSAclOperation> operations) {
602    return CompletableFuture.supplyAsync(() -> {
603      try {
604        for (HDFSAclOperation hdfsAclOperation : operations) {
605          handleHDFSAcl(hdfsAclOperation).get();
606        }
607      } catch (InterruptedException | ExecutionException e) {
608        LOG.error("Set HDFS acl error", e);
609      }
610      return null;
611    }, pool);
612  }
613
614  private CompletableFuture<Void> handleHDFSAclParallel(List<HDFSAclOperation> operations) {
615    List<CompletableFuture<Void>> futures =
616      operations.stream().map(this::handleHDFSAcl).collect(Collectors.toList());
617    return CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
618  }
619
620  private static AclEntry aclEntry(AclEntryScope scope, String name) {
621    return new AclEntry.Builder().setScope(scope)
622      .setType(AuthUtil.isGroupPrincipal(name) ? GROUP : USER).setName(name)
623      .setPermission(READ_EXECUTE).build();
624  }
625
626  void createDirIfNotExist(Path path) throws IOException {
627    if (!fs.exists(path)) {
628      fs.mkdirs(path);
629    }
630  }
631
632  void deleteEmptyDir(Path path) throws IOException {
633    if (fs.exists(path) && fs.listStatus(path).length == 0) {
634      fs.delete(path, false);
635    }
636  }
637
638  /**
639   * Inner class used to describe modify or remove what type of acl entries(ACCESS, DEFAULT,
640   * ACCESS_AND_DEFAULT) for files or directories(and child files).
641   */
642  private static class HDFSAclOperation {
643    enum OperationType {
644      MODIFY,
645      REMOVE
646    }
647
648    enum AclType {
649      ACCESS,
650      DEFAULT,
651      DEFAULT_ADN_ACCESS
652    }
653
654    private interface Operation {
655      void apply(FileSystem fs, Path path, List<AclEntry> aclList) throws IOException;
656    }
657
658    private FileSystem fs;
659    private Path path;
660    private Operation operation;
661    private boolean recursive;
662    private AclType aclType;
663    private List<AclEntry> defaultAndAccessAclEntries;
664    private List<AclEntry> accessAclEntries;
665    private List<AclEntry> defaultAclEntries;
666
667    HDFSAclOperation(FileSystem fs, Path path, Set<String> users, OperationType operationType,
668      boolean recursive, AclType aclType) {
669      this.fs = fs;
670      this.path = path;
671      this.defaultAndAccessAclEntries = getAclEntries(AclType.DEFAULT_ADN_ACCESS, users);
672      this.accessAclEntries = getAclEntries(AclType.ACCESS, users);
673      this.defaultAclEntries = getAclEntries(AclType.DEFAULT, users);
674      if (operationType == OperationType.MODIFY) {
675        operation = FileSystem::modifyAclEntries;
676      } else if (operationType == OperationType.REMOVE) {
677        operation = FileSystem::removeAclEntries;
678      } else {
679        throw new IllegalArgumentException("Illegal HDFS acl operation type: " + operationType);
680      }
681      this.recursive = recursive;
682      this.aclType = aclType;
683    }
684
685    HDFSAclOperation(Path path, HDFSAclOperation parent) {
686      this.fs = parent.fs;
687      this.path = path;
688      this.defaultAndAccessAclEntries = parent.defaultAndAccessAclEntries;
689      this.accessAclEntries = parent.accessAclEntries;
690      this.defaultAclEntries = parent.defaultAclEntries;
691      this.operation = parent.operation;
692      this.recursive = parent.recursive;
693      this.aclType = parent.aclType;
694    }
695
696    List<HDFSAclOperation> getChildAclOperations() throws IOException {
697      List<HDFSAclOperation> hdfsAclOperations = new ArrayList<>();
698      if (recursive && fs.isDirectory(path)) {
699        FileStatus[] fileStatuses = fs.listStatus(path);
700        for (FileStatus fileStatus : fileStatuses) {
701          hdfsAclOperations.add(new HDFSAclOperation(fileStatus.getPath(), this));
702        }
703      }
704      return hdfsAclOperations;
705    }
706
707    void handleAcl() throws IOException {
708      if (fs.exists(path)) {
709        if (fs.isDirectory(path)) {
710          switch (aclType) {
711            case ACCESS:
712              operation.apply(fs, path, accessAclEntries);
713              break;
714            case DEFAULT:
715              operation.apply(fs, path, defaultAclEntries);
716              break;
717            case DEFAULT_ADN_ACCESS:
718              operation.apply(fs, path, defaultAndAccessAclEntries);
719              break;
720            default:
721              throw new IllegalArgumentException("Illegal HDFS acl type: " + aclType);
722          }
723        } else {
724          operation.apply(fs, path, accessAclEntries);
725        }
726      }
727    }
728
729    private List<AclEntry> getAclEntries(AclType aclType, Set<String> users) {
730      List<AclEntry> aclEntries = new ArrayList<>();
731      switch (aclType) {
732        case ACCESS:
733          for (String user : users) {
734            aclEntries.add(aclEntry(ACCESS, user));
735          }
736          break;
737        case DEFAULT:
738          for (String user : users) {
739            aclEntries.add(aclEntry(DEFAULT, user));
740          }
741          break;
742        case DEFAULT_ADN_ACCESS:
743          for (String user : users) {
744            aclEntries.add(aclEntry(ACCESS, user));
745            aclEntries.add(aclEntry(DEFAULT, user));
746          }
747          break;
748        default:
749          throw new IllegalArgumentException("Illegal HDFS acl type: " + aclType);
750      }
751      return aclEntries;
752    }
753  }
754
755  static final class PathHelper {
756    Configuration conf;
757    Path rootDir;
758    Path tmpDataDir;
759    Path dataDir;
760    Path mobDataDir;
761    Path archiveDataDir;
762    Path snapshotDir;
763
764    PathHelper(Configuration conf) {
765      this.conf = conf;
766      rootDir = new Path(conf.get(HConstants.HBASE_DIR));
767      tmpDataDir =
768        new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), HConstants.BASE_NAMESPACE_DIR);
769      dataDir = new Path(rootDir, HConstants.BASE_NAMESPACE_DIR);
770      mobDataDir = new Path(MobUtils.getMobHome(rootDir), HConstants.BASE_NAMESPACE_DIR);
771      archiveDataDir = new Path(new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY),
772        HConstants.BASE_NAMESPACE_DIR);
773      snapshotDir = new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
774    }
775
776    Path getRootDir() {
777      return rootDir;
778    }
779
780    Path getDataDir() {
781      return dataDir;
782    }
783
784    Path getMobDir() {
785      return mobDataDir.getParent();
786    }
787
788    Path getMobDataDir() {
789      return mobDataDir;
790    }
791
792    Path getTmpDir() {
793      return new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY);
794    }
795
796    Path getTmpDataDir() {
797      return tmpDataDir;
798    }
799
800    Path getArchiveDir() {
801      return new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
802    }
803
804    Path getArchiveDataDir() {
805      return archiveDataDir;
806    }
807
808    Path getDataNsDir(String namespace) {
809      return new Path(dataDir, namespace);
810    }
811
812    Path getMobDataNsDir(String namespace) {
813      return new Path(mobDataDir, namespace);
814    }
815
816    Path getDataTableDir(TableName tableName) {
817      return new Path(getDataNsDir(tableName.getNamespaceAsString()),
818        tableName.getQualifierAsString());
819    }
820
821    Path getMobTableDir(TableName tableName) {
822      return new Path(getMobDataNsDir(tableName.getNamespaceAsString()),
823        tableName.getQualifierAsString());
824    }
825
826    Path getArchiveNsDir(String namespace) {
827      return new Path(archiveDataDir, namespace);
828    }
829
830    Path getArchiveTableDir(TableName tableName) {
831      return new Path(getArchiveNsDir(tableName.getNamespaceAsString()),
832        tableName.getQualifierAsString());
833    }
834
835    Path getTmpNsDir(String namespace) {
836      return new Path(tmpDataDir, namespace);
837    }
838
839    Path getTmpTableDir(TableName tableName) {
840      return new Path(getTmpNsDir(tableName.getNamespaceAsString()),
841        tableName.getQualifierAsString());
842    }
843
844    Path getSnapshotRootDir() {
845      return snapshotDir;
846    }
847
848    Path getSnapshotDir(String snapshot) {
849      return new Path(snapshotDir, snapshot);
850    }
851
852    FileSystem getFileSystem() throws IOException {
853      return rootDir.getFileSystem(conf);
854    }
855  }
856}