001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.security;
019
020import java.io.IOException;
021import java.util.LinkedHashSet;
022import java.util.Set;
023import java.util.concurrent.Callable;
024import java.util.concurrent.Executors;
025import java.util.concurrent.TimeUnit;
026import org.apache.hadoop.conf.Configuration;
027import org.apache.hadoop.fs.CommonConfigurationKeys;
028import org.apache.hadoop.hbase.BaseConfigurable;
029import org.apache.hadoop.security.Groups;
030import org.apache.hadoop.security.UserGroupInformation;
031import org.apache.hadoop.util.ReflectionUtils;
032import org.apache.yetus.audience.InterfaceAudience;
033
034import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder;
035import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader;
036import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache;
037import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ListenableFuture;
038import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ListeningExecutorService;
039import org.apache.hbase.thirdparty.com.google.common.util.concurrent.MoreExecutors;
040import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
041
042/**
043 * Provide an instance of a user. Allows custom {@link User} creation.
044 */
045@InterfaceAudience.Private
046public class UserProvider extends BaseConfigurable {
047
048  private static final String USER_PROVIDER_CONF_KEY = "hbase.client.userprovider.class";
049  private static final ListeningExecutorService executor =
050    MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1,
051      new ThreadFactoryBuilder().setDaemon(true).setNameFormat("group-cache-%d").build()));
052
053  private LoadingCache<String, String[]> groupCache = null;
054
055  static Groups groups = Groups.getUserToGroupsMappingService();
056
057  public static Groups getGroups() {
058    return groups;
059  }
060
061  public static void setGroups(Groups groups) {
062    UserProvider.groups = groups;
063  }
064
065  @Override
066  public void setConf(final Configuration conf) {
067    super.setConf(conf);
068
069    synchronized (UserProvider.class) {
070      if (!(groups instanceof User.TestingGroups)) {
071        groups = Groups.getUserToGroupsMappingService(conf);
072      }
073    }
074
075    long cacheTimeout = getConf().getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS,
076      CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT) * 1000;
077
078    this.groupCache = CacheBuilder.newBuilder()
079      // This is the same timeout that hadoop uses. So we'll follow suit.
080      .refreshAfterWrite(cacheTimeout, TimeUnit.MILLISECONDS)
081      .expireAfterWrite(10 * cacheTimeout, TimeUnit.MILLISECONDS)
082      // Set concurrency level equal to the default number of handlers that
083      // the simple handler spins up.
084      .concurrencyLevel(20)
085      // create the loader
086      // This just delegates to UGI.
087      .build(new CacheLoader<String, String[]>() {
088
089        // Since UGI's don't hash based on the user id
090        // The cache needs to be keyed on the same thing that Hadoop's Groups class
091        // uses. So this cache uses shortname.
092        @Override
093        public String[] load(String ugi) throws Exception {
094          return getGroupStrings(ugi);
095        }
096
097        private String[] getGroupStrings(String ugi) {
098          try {
099            Set<String> result = new LinkedHashSet<>(groups.getGroups(ugi));
100            return result.toArray(new String[result.size()]);
101          } catch (Exception e) {
102            return new String[0];
103          }
104        }
105
106        // Provide the reload function that uses the executor thread.
107        @Override
108        public ListenableFuture<String[]> reload(final String k, String[] oldValue)
109          throws Exception {
110
111          return executor.submit(new Callable<String[]>() {
112            @Override
113            public String[] call() throws Exception {
114              return getGroupStrings(k);
115            }
116          });
117        }
118      });
119  }
120
121  /**
122   * Instantiate the {@link UserProvider} specified in the configuration and set the passed
123   * configuration via {@link UserProvider#setConf(Configuration)}
124   * @param conf to read and set on the created {@link UserProvider}
125   * @return a {@link UserProvider} ready for use.
126   */
127  public static UserProvider instantiate(Configuration conf) {
128    Class<? extends UserProvider> clazz =
129      conf.getClass(USER_PROVIDER_CONF_KEY, UserProvider.class, UserProvider.class);
130    return ReflectionUtils.newInstance(clazz, conf);
131  }
132
133  /**
134   * Set the {@link UserProvider} in the given configuration that should be instantiated
135   * @param conf     to update
136   * @param provider class of the provider to set
137   */
138  public static void setUserProviderForTesting(Configuration conf,
139    Class<? extends UserProvider> provider) {
140    conf.set(USER_PROVIDER_CONF_KEY, provider.getName());
141  }
142
143  /**
144   * Returns the userName for the current logged-in user.
145   * @throws IOException if the underlying user cannot be obtained
146   */
147  public String getCurrentUserName() throws IOException {
148    User user = getCurrent();
149    return user == null ? null : user.getName();
150  }
151
152  /** Returns <tt>true</tt> if security is enabled, <tt>false</tt> otherwise */
153  public boolean isHBaseSecurityEnabled() {
154    return User.isHBaseSecurityEnabled(this.getConf());
155  }
156
157  /**
158   * Return whether or not Kerberos authentication is configured for Hadoop. For non-secure Hadoop,
159   * this always returns <code>false</code>. For secure Hadoop, it will return the value from
160   * {@code UserGroupInformation.isSecurityEnabled()}.
161   */
162  public boolean isHadoopSecurityEnabled() {
163    return User.isSecurityEnabled();
164  }
165
166  /**
167   * In secure environment, if a user specified his keytab and principal, a hbase client will try to
168   * login with them. Otherwise, hbase client will try to obtain ticket(through kinit) from system.
169   */
170  public boolean shouldLoginFromKeytab() {
171    return User.shouldLoginFromKeytab(this.getConf());
172  }
173
174  /**
175   * Return the current user within the current execution context
176   * @throws IOException if the user cannot be loaded
177   */
178  public User getCurrent() throws IOException {
179    return User.getCurrent();
180  }
181
182  /**
183   * Wraps an underlying {@code UserGroupInformation} instance.
184   * @param ugi The base Hadoop user
185   */
186  public User create(UserGroupInformation ugi) {
187    if (ugi == null) {
188      return null;
189    }
190    return new User.SecureHadoopUser(ugi, groupCache);
191  }
192
193  /**
194   * Log in the current process using the given configuration keys for the credential file and login
195   * principal. It is for SPN(Service Principal Name) login. SPN should be this format,
196   * servicename/fully.qualified.domain.name@REALM.
197   * <p>
198   * <strong>This is only applicable when running on secure Hadoop</strong> -- see
199   * org.apache.hadoop.security.SecurityUtil#login(Configuration,String,String,String). On regular
200   * Hadoop (without security features), this will safely be ignored.
201   * </p>
202   * @param fileConfKey      Property key used to configure path to the credential file
203   * @param principalConfKey Property key used to configure login principal
204   * @param localhost        Current hostname to use in any credentials
205   * @throws IOException underlying exception from SecurityUtil.login() call
206   */
207  public void login(String fileConfKey, String principalConfKey, String localhost)
208    throws IOException {
209    User.login(getConf(), fileConfKey, principalConfKey, localhost);
210  }
211
212  /**
213   * Login with given keytab and principal. This can be used for both SPN(Service Principal Name)
214   * and UPN(User Principal Name) which format should be clientname@REALM.
215   * @param fileConfKey      config name for client keytab
216   * @param principalConfKey config name for client principal
217   * @throws IOException underlying exception from UserGroupInformation.loginUserFromKeytab
218   */
219  public void login(String fileConfKey, String principalConfKey) throws IOException {
220    User.login(getConf().get(fileConfKey), getConf().get(principalConfKey));
221  }
222}