View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.security.access;
20  
21  import com.google.protobuf.RpcCallback;
22  import com.google.protobuf.RpcController;
23  import com.google.protobuf.Service;
24  import org.apache.commons.logging.Log;
25  import org.apache.commons.logging.LogFactory;
26  import org.apache.hadoop.classification.InterfaceAudience;
27  import org.apache.hadoop.conf.Configuration;
28  import org.apache.hadoop.fs.FileStatus;
29  import org.apache.hadoop.fs.FileSystem;
30  import org.apache.hadoop.fs.FileUtil;
31  import org.apache.hadoop.fs.Path;
32  import org.apache.hadoop.fs.permission.FsPermission;
33  import org.apache.hadoop.hbase.Coprocessor;
34  import org.apache.hadoop.hbase.CoprocessorEnvironment;
35  import org.apache.hadoop.hbase.TableName;
36  import org.apache.hadoop.hbase.DoNotRetryIOException;
37  import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
38  import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
39  import org.apache.hadoop.hbase.ipc.RequestContext;
40  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
41  import org.apache.hadoop.hbase.protobuf.ResponseConverter;
42  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
43  import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadService;
44  import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest;
45  import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadResponse;
46  import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest;
47  import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadResponse;
48  import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesRequest;
49  import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesResponse;
50  import org.apache.hadoop.hbase.regionserver.HRegion;
51  import org.apache.hadoop.hbase.security.SecureBulkLoadUtil;
52  import org.apache.hadoop.hbase.security.User;
53  import org.apache.hadoop.hbase.security.UserProvider;
54  import org.apache.hadoop.hbase.security.token.FsDelegationToken;
55  import org.apache.hadoop.hbase.util.Bytes;
56  import org.apache.hadoop.hbase.util.FSHDFSUtils;
57  import org.apache.hadoop.hbase.util.Methods;
58  import org.apache.hadoop.hbase.util.Pair;
59  import org.apache.hadoop.io.Text;
60  import org.apache.hadoop.security.UserGroupInformation;
61  import org.apache.hadoop.security.token.Token;
62  
63  import java.io.IOException;
64  import java.math.BigInteger;
65  import java.security.PrivilegedAction;
66  import java.security.SecureRandom;
67  import java.util.ArrayList;
68  import java.util.List;
69  
70  /**
71   * Coprocessor service for bulk loads in secure mode.
72   * This coprocessor has to be installed as part of enabling
73   * security in HBase.
74   *
75   * This service addresses two issues:
76   *
77   * 1. Moving files in a secure filesystem wherein the HBase Client
78   * and HBase Server are different filesystem users.
79   * 2. Does moving in a secure manner. Assuming that the filesystem
80   * is POSIX compliant.
81   *
82   * The algorithm is as follows:
83   *
84   * 1. Create an hbase owned staging directory which is
85   * world traversable (711): /hbase/staging
86   * 2. A user writes out data to his secure output directory: /user/foo/data
87   * 3. A call is made to hbase to create a secret staging directory
88   * which globally rwx (777): /user/staging/averylongandrandomdirectoryname
89   * 4. The user moves the data into the random staging directory,
90   * then calls bulkLoadHFiles()
91   *
92   * Like delegation tokens the strength of the security lies in the length
93   * and randomness of the secret directory.
94   *
95   */
96  @InterfaceAudience.Private
97  public class SecureBulkLoadEndpoint extends SecureBulkLoadService
98      implements CoprocessorService, Coprocessor {
99  
100   public static final long VERSION = 0L;
101 
102   //320/5 = 64 characters
103   private static final int RANDOM_WIDTH = 320;
104   private static final int RANDOM_RADIX = 32;
105 
106   private static Log LOG = LogFactory.getLog(SecureBulkLoadEndpoint.class);
107 
108   private final static FsPermission PERM_ALL_ACCESS = FsPermission.valueOf("-rwxrwxrwx");
109   private final static FsPermission PERM_HIDDEN = FsPermission.valueOf("-rwx--x--x");
110 
111   private SecureRandom random;
112   private FileSystem fs;
113   private Configuration conf;
114 
115   //two levels so it doesn't get deleted accidentally
116   //no sticky bit in Hadoop 1.0
117   private Path baseStagingDir;
118 
119   private RegionCoprocessorEnvironment env;
120 
121   private UserProvider userProvider;
122 
123   @Override
124   public void start(CoprocessorEnvironment env) {
125     this.env = (RegionCoprocessorEnvironment)env;
126     random = new SecureRandom();
127     conf = env.getConfiguration();
128     baseStagingDir = SecureBulkLoadUtil.getBaseStagingDir(conf);
129     this.userProvider = UserProvider.instantiate(conf);
130 
131     try {
132       fs = FileSystem.get(conf);
133       fs.mkdirs(baseStagingDir, PERM_HIDDEN);
134       fs.setPermission(baseStagingDir, PERM_HIDDEN);
135       //no sticky bit in hadoop-1.0, making directory nonempty so it never gets erased
136       fs.mkdirs(new Path(baseStagingDir,"DONOTERASE"), PERM_HIDDEN);
137       FileStatus status = fs.getFileStatus(baseStagingDir);
138       if(status == null) {
139         throw new IllegalStateException("Failed to create staging directory");
140       }
141       if(!status.getPermission().equals(PERM_HIDDEN)) {
142         throw new IllegalStateException(
143             "Directory already exists but permissions aren't set to '-rwx--x--x' ");
144       }
145     } catch (IOException e) {
146       throw new IllegalStateException("Failed to get FileSystem instance",e);
147     }
148   }
149 
150   @Override
151   public void stop(CoprocessorEnvironment env) throws IOException {
152   }
153 
154   @Override
155   public void prepareBulkLoad(RpcController controller,
156                                                  PrepareBulkLoadRequest request,
157                                                  RpcCallback<PrepareBulkLoadResponse> done){
158     try {
159       getAccessController().prePrepareBulkLoad(env);
160       String bulkToken = createStagingDir(baseStagingDir,
161           getActiveUser(), ProtobufUtil.toTableName(request.getTableName())).toString();
162       done.run(PrepareBulkLoadResponse.newBuilder().setBulkToken(bulkToken).build());
163     } catch (IOException e) {
164       ResponseConverter.setControllerException(controller, e);
165     }
166     done.run(null);
167   }
168 
169   @Override
170   public void cleanupBulkLoad(RpcController controller,
171                               CleanupBulkLoadRequest request,
172                               RpcCallback<CleanupBulkLoadResponse> done) {
173     try {
174       getAccessController().preCleanupBulkLoad(env);
175       fs.delete(createStagingDir(baseStagingDir,
176           getActiveUser(),
177           new Path(request.getBulkToken()).getName()),
178           true);
179       done.run(CleanupBulkLoadResponse.newBuilder().build());
180     } catch (IOException e) {
181       ResponseConverter.setControllerException(controller, e);
182     }
183     done.run(null);
184   }
185 
186   @Override
187   public void secureBulkLoadHFiles(RpcController controller,
188                                    SecureBulkLoadHFilesRequest request,
189                                    RpcCallback<SecureBulkLoadHFilesResponse> done) {
190     final List<Pair<byte[], String>> familyPaths = new ArrayList<Pair<byte[], String>>();
191     for(ClientProtos.BulkLoadHFileRequest.FamilyPath el : request.getFamilyPathList()) {
192       familyPaths.add(new Pair(el.getFamily().toByteArray(),el.getPath()));
193     }
194     final Token userToken =
195         new Token(request.getFsToken().getIdentifier().toByteArray(),
196                   request.getFsToken().getPassword().toByteArray(),
197                   new Text(request.getFsToken().getKind()),
198                   new Text(request.getFsToken().getService()));
199     final String bulkToken = request.getBulkToken();
200     User user = getActiveUser();
201     final UserGroupInformation ugi = user.getUGI();
202     if(userToken != null) {
203       ugi.addToken(userToken);
204     } else if (userProvider.isHadoopSecurityEnabled()) {
205       //we allow this to pass through in "simple" security mode
206       //for mini cluster testing
207       ResponseConverter.setControllerException(controller,
208           new DoNotRetryIOException("User token cannot be null"));
209       return;
210     }
211 
212     HRegion region = env.getRegion();
213     boolean bypass = false;
214     if (region.getCoprocessorHost() != null) {
215       try {
216         bypass = region.getCoprocessorHost().preBulkLoadHFile(familyPaths);
217       } catch (IOException e) {
218         ResponseConverter.setControllerException(controller, e);
219         done.run(null);
220         return;
221       }
222     }
223     boolean loaded = false;
224     if (!bypass) {
225       // Get the target fs (HBase region server fs) delegation token
226       // Since we have checked the permission via 'preBulkLoadHFile', now let's give
227       // the 'request user' necessary token to operate on the target fs.
228       // After this point the 'doAs' user will hold two tokens, one for the source fs
229       // ('request user'), another for the target fs (HBase region server principal).
230       FsDelegationToken targetfsDelegationToken = new FsDelegationToken(userProvider, "renewer");
231       try {
232         targetfsDelegationToken.acquireDelegationToken(fs);
233       } catch (IOException e) {
234         ResponseConverter.setControllerException(controller, e);
235         done.run(null);
236         return;
237       }
238       Token<?> targetFsToken = targetfsDelegationToken.getUserToken();
239       if (targetFsToken != null && (userToken == null
240           || !targetFsToken.getService().equals(userToken.getService()))) {
241         ugi.addToken(targetFsToken);
242       }
243 
244       loaded = ugi.doAs(new PrivilegedAction<Boolean>() {
245         @Override
246         public Boolean run() {
247           FileSystem fs = null;
248           try {
249             Configuration conf = env.getConfiguration();
250             fs = FileSystem.get(conf);
251             for(Pair<byte[], String> el: familyPaths) {
252               Path p = new Path(el.getSecond());
253               LOG.trace("Setting permission for: " + p);
254               fs.setPermission(p, PERM_ALL_ACCESS);
255 
256               Path stageFamily = new Path(bulkToken, Bytes.toString(el.getFirst()));
257               if(!fs.exists(stageFamily)) {
258                 fs.mkdirs(stageFamily);
259                 fs.setPermission(stageFamily, PERM_ALL_ACCESS);
260               }
261             }
262             //We call bulkLoadHFiles as requesting user
263             //To enable access prior to staging
264             return env.getRegion().bulkLoadHFiles(familyPaths, true,
265                 new SecureBulkLoadListener(fs, bulkToken, conf));
266           } catch (Exception e) {
267             LOG.error("Failed to complete bulk load", e);
268           }
269           return false;
270         }
271       });
272     }
273     if (region.getCoprocessorHost() != null) {
274       try {
275         loaded = region.getCoprocessorHost().postBulkLoadHFile(familyPaths, loaded);
276       } catch (IOException e) {
277         ResponseConverter.setControllerException(controller, e);
278         done.run(null);
279         return;
280       }
281     }
282     done.run(SecureBulkLoadHFilesResponse.newBuilder().setLoaded(loaded).build());
283   }
284 
285   private AccessController getAccessController() {
286     return (AccessController) this.env.getRegion()
287         .getCoprocessorHost().findCoprocessor(AccessController.class.getName());
288   }
289 
290   private Path createStagingDir(Path baseDir,
291                                 User user,
292                                 TableName tableName) throws IOException {
293     String tblName = tableName.getNameAsString().replace(":", "_");
294     String randomDir = user.getShortName()+"__"+ tblName +"__"+
295         (new BigInteger(RANDOM_WIDTH, random).toString(RANDOM_RADIX));
296     return createStagingDir(baseDir, user, randomDir);
297   }
298 
299   private Path createStagingDir(Path baseDir,
300                                 User user,
301                                 String randomDir) throws IOException {
302     Path p = new Path(baseDir, randomDir);
303     fs.mkdirs(p, PERM_ALL_ACCESS);
304     fs.setPermission(p, PERM_ALL_ACCESS);
305     return p;
306   }
307 
308   private User getActiveUser() {
309     User user = RequestContext.getRequestUser();
310     if (!RequestContext.isInRequestContext()) {
311       return null;
312     }
313 
314     //this is for testing
315     if("simple".equalsIgnoreCase(conf.get(User.HBASE_SECURITY_CONF_KEY))) {
316       return User.createUserForTesting(conf, user.getShortName(), new String[]{});
317     }
318 
319     return user;
320   }
321 
322   @Override
323   public Service getService() {
324     return this;
325   }
326 
327   private static class SecureBulkLoadListener implements HRegion.BulkLoadListener {
328     // Target filesystem
329     private FileSystem fs;
330     private String stagingDir;
331     private Configuration conf;
332     // Source filesystem
333     private FileSystem srcFs = null;
334 
335     public SecureBulkLoadListener(FileSystem fs, String stagingDir, Configuration conf) {
336       this.fs = fs;
337       this.stagingDir = stagingDir;
338       this.conf = conf;
339     }
340 
341     @Override
342     public String prepareBulkLoad(final byte[] family, final String srcPath) throws IOException {
343       Path p = new Path(srcPath);
344       Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName()));
345       if (srcFs == null) {
346         srcFs = FileSystem.get(p.toUri(), conf);
347       }
348 
349       if(!isFile(p)) {
350         throw new IOException("Path does not reference a file: " + p);
351       }
352 
353       // Check to see if the source and target filesystems are the same
354       if (!FSHDFSUtils.isSameHdfs(conf, srcFs, fs)) {
355         LOG.debug("Bulk-load file " + srcPath + " is on different filesystem than " +
356             "the destination filesystem. Copying file over to destination staging dir.");
357         FileUtil.copy(srcFs, p, fs, stageP, false, conf);
358       }
359       else {
360         LOG.debug("Moving " + p + " to " + stageP);
361         if(!fs.rename(p, stageP)) {
362           throw new IOException("Failed to move HFile: " + p + " to " + stageP);
363         }
364       }
365       return stageP.toString();
366     }
367 
368     @Override
369     public void doneBulkLoad(byte[] family, String srcPath) throws IOException {
370       LOG.debug("Bulk Load done for: " + srcPath);
371     }
372 
373     @Override
374     public void failedBulkLoad(final byte[] family, final String srcPath) throws IOException {
375       Path p = new Path(srcPath);
376       Path stageP = new Path(stagingDir,
377           new Path(Bytes.toString(family), p.getName()));
378       LOG.debug("Moving " + stageP + " back to " + p);
379       if(!fs.rename(stageP, p))
380         throw new IOException("Failed to move HFile: " + stageP + " to " + p);
381     }
382 
383     /**
384      * Check if the path is referencing a file.
385      * This is mainly needed to avoid symlinks.
386      * @param p
387      * @return true if the p is a file
388      * @throws IOException
389      */
390     private boolean isFile(Path p) throws IOException {
391       FileStatus status = srcFs.getFileStatus(p);
392       boolean isFile = !status.isDirectory();
393       try {
394         isFile = isFile && !(Boolean)Methods.call(FileStatus.class, status, "isSymlink", null, null);
395       } catch (Exception e) {
396       }
397       return isFile;
398     }
399   }
400 }