001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.security;
019
020import java.io.IOException;
021import java.util.LinkedHashSet;
022import java.util.Set;
023import java.util.concurrent.Callable;
024import java.util.concurrent.Executors;
025import java.util.concurrent.TimeUnit;
026import org.apache.hadoop.conf.Configuration;
027import org.apache.hadoop.fs.CommonConfigurationKeys;
028import org.apache.hadoop.hbase.BaseConfigurable;
029import org.apache.hadoop.security.Groups;
030import org.apache.hadoop.security.UserGroupInformation;
031import org.apache.hadoop.util.ReflectionUtils;
032import org.apache.yetus.audience.InterfaceAudience;
033
034import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder;
035import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader;
036import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache;
037import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ListenableFuture;
038import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ListeningExecutorService;
039import org.apache.hbase.thirdparty.com.google.common.util.concurrent.MoreExecutors;
040import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
041
042/**
043 * Provide an instance of a user. Allows custom {@link User} creation.
044 */
045@InterfaceAudience.Private
046public class UserProvider extends BaseConfigurable {
047
048  private static final String USER_PROVIDER_CONF_KEY = "hbase.client.userprovider.class";
049  private static final ListeningExecutorService executor = MoreExecutors.listeningDecorator(
050      Executors.newScheduledThreadPool(
051          1,
052          new ThreadFactoryBuilder().setDaemon(true).setNameFormat("group-cache-%d").build()));
053
054  private LoadingCache<String, String[]> groupCache = null;
055
056  static Groups groups = Groups.getUserToGroupsMappingService();
057
058  public static Groups getGroups() {
059    return groups;
060  }
061
062  public static void setGroups(Groups groups) {
063    UserProvider.groups = groups;
064  }
065
066  @Override
067  public void setConf(final Configuration conf) {
068    super.setConf(conf);
069
070    synchronized (UserProvider.class) {
071      if (!(groups instanceof User.TestingGroups)) {
072        groups = Groups.getUserToGroupsMappingService(conf);
073      }
074    }
075
076    long cacheTimeout =
077        getConf().getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS,
078            CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT) * 1000;
079
080    this.groupCache = CacheBuilder.newBuilder()
081        // This is the same timeout that hadoop uses. So we'll follow suit.
082        .refreshAfterWrite(cacheTimeout, TimeUnit.MILLISECONDS)
083        .expireAfterWrite(10 * cacheTimeout, TimeUnit.MILLISECONDS)
084            // Set concurrency level equal to the default number of handlers that
085            // the simple handler spins up.
086        .concurrencyLevel(20)
087            // create the loader
088            // This just delegates to UGI.
089        .build(new CacheLoader<String, String[]>() {
090
091          // Since UGI's don't hash based on the user id
092          // The cache needs to be keyed on the same thing that Hadoop's Groups class
093          // uses. So this cache uses shortname.
094          @Override
095          public String[] load(String ugi) throws Exception {
096            return getGroupStrings(ugi);
097          }
098
099          private String[] getGroupStrings(String ugi) {
100            try {
101              Set<String> result = new LinkedHashSet<>(groups.getGroups(ugi));
102              return result.toArray(new String[result.size()]);
103            } catch (Exception e) {
104              return new String[0];
105            }
106          }
107
108          // Provide the reload function that uses the executor thread.
109          @Override
110          public ListenableFuture<String[]> reload(final String k, String[] oldValue)
111              throws Exception {
112
113            return executor.submit(new Callable<String[]>() {
114              @Override
115              public String[] call() throws Exception {
116                return getGroupStrings(k);
117              }
118            });
119          }
120        });
121  }
122
123  /**
124   * Instantiate the {@link UserProvider} specified in the configuration and set the passed
125   * configuration via {@link UserProvider#setConf(Configuration)}
126   * @param conf to read and set on the created {@link UserProvider}
127   * @return a {@link UserProvider} ready for use.
128   */
129  public static UserProvider instantiate(Configuration conf) {
130    Class<? extends UserProvider> clazz =
131        conf.getClass(USER_PROVIDER_CONF_KEY, UserProvider.class, UserProvider.class);
132    return ReflectionUtils.newInstance(clazz, conf);
133  }
134
135  /**
136   * Set the {@link UserProvider} in the given configuration that should be instantiated
137   * @param conf to update
138   * @param provider class of the provider to set
139   */
140  public static void setUserProviderForTesting(Configuration conf,
141      Class<? extends UserProvider> provider) {
142    conf.set(USER_PROVIDER_CONF_KEY, provider.getName());
143  }
144
145  /**
146   * @return the userName for the current logged-in user.
147   * @throws IOException if the underlying user cannot be obtained
148   */
149  public String getCurrentUserName() throws IOException {
150    User user = getCurrent();
151    return user == null ? null : user.getName();
152  }
153
154  /**
155   * @return <tt>true</tt> if security is enabled, <tt>false</tt> otherwise
156   */
157  public boolean isHBaseSecurityEnabled() {
158    return User.isHBaseSecurityEnabled(this.getConf());
159  }
160
161  /**
162   * @return whether or not Kerberos authentication is configured for Hadoop. For non-secure Hadoop,
163   *         this always returns <code>false</code>. For secure Hadoop, it will return the value
164   *         from {@code UserGroupInformation.isSecurityEnabled()}.
165   */
166  public boolean isHadoopSecurityEnabled() {
167    return User.isSecurityEnabled();
168  }
169
170  /**
171   * In secure environment, if a user specified his keytab and principal,
172   * a hbase client will try to login with them. Otherwise, hbase client will try to obtain
173   * ticket(through kinit) from system.
174   */
175  public boolean shouldLoginFromKeytab() {
176    return User.shouldLoginFromKeytab(this.getConf());
177  }
178
179  /**
180   * @return the current user within the current execution context
181   * @throws IOException if the user cannot be loaded
182   */
183  public User getCurrent() throws IOException {
184    return User.getCurrent();
185  }
186
187  /**
188   * Wraps an underlying {@code UserGroupInformation} instance.
189   * @param ugi The base Hadoop user
190   * @return User
191   */
192  public User create(UserGroupInformation ugi) {
193    if (ugi == null) {
194      return null;
195    }
196    return new User.SecureHadoopUser(ugi, groupCache);
197  }
198
199  /**
200   * Log in the current process using the given configuration keys for the credential file and login
201   * principal. It is for SPN(Service Principal Name) login. SPN should be this format,
202   * servicename/fully.qualified.domain.name@REALM.
203   * <p>
204   * <strong>This is only applicable when running on secure Hadoop</strong> -- see
205   * org.apache.hadoop.security.SecurityUtil#login(Configuration,String,String,String). On regular
206   * Hadoop (without security features), this will safely be ignored.
207   * </p>
208   * @param fileConfKey Property key used to configure path to the credential file
209   * @param principalConfKey Property key used to configure login principal
210   * @param localhost Current hostname to use in any credentials
211   * @throws IOException underlying exception from SecurityUtil.login() call
212   */
213  public void login(String fileConfKey, String principalConfKey, String localhost)
214      throws IOException {
215    User.login(getConf(), fileConfKey, principalConfKey, localhost);
216  }
217
218  /**
219   * Login with given keytab and principal. This can be used for both SPN(Service Principal Name)
220   * and UPN(User Principal Name) which format should be clientname@REALM.
221   * @param fileConfKey config name for client keytab
222   * @param principalConfKey config name for client principal
223   * @throws IOException underlying exception from UserGroupInformation.loginUserFromKeytab
224   */
225  public void login(String fileConfKey, String principalConfKey) throws IOException {
226    User.login(getConf().get(fileConfKey), getConf().get(principalConfKey));
227  }
228}