1 /**
2 *
3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19 package org.apache.hadoop.hbase.mapreduce;
20
21 import java.io.File;
22 import java.io.IOException;
23 import java.net.URL;
24 import java.net.URLDecoder;
25 import java.util.ArrayList;
26 import java.util.Enumeration;
27 import java.util.HashMap;
28 import java.util.HashSet;
29 import java.util.List;
30 import java.util.Map;
31 import java.util.Set;
32 import java.util.zip.ZipEntry;
33 import java.util.zip.ZipFile;
34
35 import org.apache.commons.logging.Log;
36 import org.apache.commons.logging.LogFactory;
37 import org.apache.hadoop.conf.Configuration;
38 import org.apache.hadoop.fs.FileSystem;
39 import org.apache.hadoop.fs.Path;
40 import org.apache.hadoop.hbase.HBaseConfiguration;
41 import org.apache.hadoop.hbase.HConstants;
42 import org.apache.hadoop.hbase.MetaTableAccessor;
43 import org.apache.hadoop.hbase.TableName;
44 import org.apache.hadoop.hbase.classification.InterfaceAudience;
45 import org.apache.hadoop.hbase.classification.InterfaceStability;
46 import org.apache.hadoop.hbase.client.Connection;
47 import org.apache.hadoop.hbase.client.ConnectionFactory;
48 import org.apache.hadoop.hbase.client.Put;
49 import org.apache.hadoop.hbase.client.Scan;
50 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
51 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
52 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
53 import org.apache.hadoop.hbase.security.User;
54 import org.apache.hadoop.hbase.security.UserProvider;
55 import org.apache.hadoop.hbase.security.token.TokenUtil;
56 import org.apache.hadoop.hbase.util.Base64;
57 import org.apache.hadoop.hbase.util.Bytes;
58 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
59 import org.apache.hadoop.io.Writable;
60 import org.apache.hadoop.mapreduce.InputFormat;
61 import org.apache.hadoop.mapreduce.Job;
62 import org.apache.hadoop.util.StringUtils;
63 import com.google.protobuf.InvalidProtocolBufferException;
64
65 /**
66 * Utility for {@link TableMapper} and {@link TableReducer}
67 */
68 @SuppressWarnings({ "rawtypes", "unchecked" })
69 @InterfaceAudience.Public
70 @InterfaceStability.Stable
71 public class TableMapReduceUtil {
72 static Log LOG = LogFactory.getLog(TableMapReduceUtil.class);
73
74 /**
75 * Use this before submitting a TableMap job. It will appropriately set up
76 * the job.
77 *
78 * @param table The table name to read from.
79 * @param scan The scan instance with the columns, time range etc.
80 * @param mapper The mapper class to use.
81 * @param outputKeyClass The class of the output key.
82 * @param outputValueClass The class of the output value.
83 * @param job The current job to adjust. Make sure the passed job is
84 * carrying all necessary HBase configuration.
85 * @throws IOException When setting up the details fails.
86 */
87 public static void initTableMapperJob(String table, Scan scan,
88 Class<? extends TableMapper> mapper,
89 Class<?> outputKeyClass,
90 Class<?> outputValueClass, Job job)
91 throws IOException {
92 initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass,
93 job, true);
94 }
95
96
97 /**
98 * Use this before submitting a TableMap job. It will appropriately set up
99 * the job.
100 *
101 * @param table The table name to read from.
102 * @param scan The scan instance with the columns, time range etc.
103 * @param mapper The mapper class to use.
104 * @param outputKeyClass The class of the output key.
105 * @param outputValueClass The class of the output value.
106 * @param job The current job to adjust. Make sure the passed job is
107 * carrying all necessary HBase configuration.
108 * @throws IOException When setting up the details fails.
109 */
110 public static void initTableMapperJob(TableName table,
111 Scan scan,
112 Class<? extends TableMapper> mapper,
113 Class<?> outputKeyClass,
114 Class<?> outputValueClass,
115 Job job) throws IOException {
116 initTableMapperJob(table.getNameAsString(),
117 scan,
118 mapper,
119 outputKeyClass,
120 outputValueClass,
121 job,
122 true);
123 }
124
125 /**
126 * Use this before submitting a TableMap job. It will appropriately set up
127 * the job.
128 *
129 * @param table Binary representation of the table name to read from.
130 * @param scan The scan instance with the columns, time range etc.
131 * @param mapper The mapper class to use.
132 * @param outputKeyClass The class of the output key.
133 * @param outputValueClass The class of the output value.
134 * @param job The current job to adjust. Make sure the passed job is
135 * carrying all necessary HBase configuration.
136 * @throws IOException When setting up the details fails.
137 */
138 public static void initTableMapperJob(byte[] table, Scan scan,
139 Class<? extends TableMapper> mapper,
140 Class<?> outputKeyClass,
141 Class<?> outputValueClass, Job job)
142 throws IOException {
143 initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass,
144 job, true);
145 }
146
147 /**
148 * Use this before submitting a TableMap job. It will appropriately set up
149 * the job.
150 *
151 * @param table The table name to read from.
152 * @param scan The scan instance with the columns, time range etc.
153 * @param mapper The mapper class to use.
154 * @param outputKeyClass The class of the output key.
155 * @param outputValueClass The class of the output value.
156 * @param job The current job to adjust. Make sure the passed job is
157 * carrying all necessary HBase configuration.
158 * @param addDependencyJars upload HBase jars and jars for any of the configured
159 * job classes via the distributed cache (tmpjars).
160 * @throws IOException When setting up the details fails.
161 */
162 public static void initTableMapperJob(String table, Scan scan,
163 Class<? extends TableMapper> mapper,
164 Class<?> outputKeyClass,
165 Class<?> outputValueClass, Job job,
166 boolean addDependencyJars, Class<? extends InputFormat> inputFormatClass)
167 throws IOException {
168 initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job,
169 addDependencyJars, true, inputFormatClass);
170 }
171
172
173 /**
174 * Use this before submitting a TableMap job. It will appropriately set up
175 * the job.
176 *
177 * @param table The table name to read from.
178 * @param scan The scan instance with the columns, time range etc.
179 * @param mapper The mapper class to use.
180 * @param outputKeyClass The class of the output key.
181 * @param outputValueClass The class of the output value.
182 * @param job The current job to adjust. Make sure the passed job is
183 * carrying all necessary HBase configuration.
184 * @param addDependencyJars upload HBase jars and jars for any of the configured
185 * job classes via the distributed cache (tmpjars).
186 * @param initCredentials whether to initialize hbase auth credentials for the job
187 * @param inputFormatClass the input format
188 * @throws IOException When setting up the details fails.
189 */
190 public static void initTableMapperJob(String table, Scan scan,
191 Class<? extends TableMapper> mapper,
192 Class<?> outputKeyClass,
193 Class<?> outputValueClass, Job job,
194 boolean addDependencyJars, boolean initCredentials,
195 Class<? extends InputFormat> inputFormatClass)
196 throws IOException {
197 job.setInputFormatClass(inputFormatClass);
198 if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass);
199 if (outputKeyClass != null) job.setMapOutputKeyClass(outputKeyClass);
200 job.setMapperClass(mapper);
201 if (Put.class.equals(outputValueClass)) {
202 job.setCombinerClass(PutCombiner.class);
203 }
204 Configuration conf = job.getConfiguration();
205 HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
206 conf.set(TableInputFormat.INPUT_TABLE, table);
207 conf.set(TableInputFormat.SCAN, convertScanToString(scan));
208 conf.setStrings("io.serializations", conf.get("io.serializations"),
209 MutationSerialization.class.getName(), ResultSerialization.class.getName(),
210 KeyValueSerialization.class.getName());
211 if (addDependencyJars) {
212 addDependencyJars(job);
213 }
214 if (initCredentials) {
215 initCredentials(job);
216 }
217 }
218
219 /**
220 * Use this before submitting a TableMap job. It will appropriately set up
221 * the job.
222 *
223 * @param table Binary representation of the table name to read from.
224 * @param scan The scan instance with the columns, time range etc.
225 * @param mapper The mapper class to use.
226 * @param outputKeyClass The class of the output key.
227 * @param outputValueClass The class of the output value.
228 * @param job The current job to adjust. Make sure the passed job is
229 * carrying all necessary HBase configuration.
230 * @param addDependencyJars upload HBase jars and jars for any of the configured
231 * job classes via the distributed cache (tmpjars).
232 * @param inputFormatClass The class of the input format
233 * @throws IOException When setting up the details fails.
234 */
235 public static void initTableMapperJob(byte[] table, Scan scan,
236 Class<? extends TableMapper> mapper,
237 Class<?> outputKeyClass,
238 Class<?> outputValueClass, Job job,
239 boolean addDependencyJars, Class<? extends InputFormat> inputFormatClass)
240 throws IOException {
241 initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass,
242 outputValueClass, job, addDependencyJars, inputFormatClass);
243 }
244
245 /**
246 * Use this before submitting a TableMap job. It will appropriately set up
247 * the job.
248 *
249 * @param table Binary representation of the table name to read from.
250 * @param scan The scan instance with the columns, time range etc.
251 * @param mapper The mapper class to use.
252 * @param outputKeyClass The class of the output key.
253 * @param outputValueClass The class of the output value.
254 * @param job The current job to adjust. Make sure the passed job is
255 * carrying all necessary HBase configuration.
256 * @param addDependencyJars upload HBase jars and jars for any of the configured
257 * job classes via the distributed cache (tmpjars).
258 * @throws IOException When setting up the details fails.
259 */
260 public static void initTableMapperJob(byte[] table, Scan scan,
261 Class<? extends TableMapper> mapper,
262 Class<?> outputKeyClass,
263 Class<?> outputValueClass, Job job,
264 boolean addDependencyJars)
265 throws IOException {
266 initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass,
267 outputValueClass, job, addDependencyJars, TableInputFormat.class);
268 }
269
270 /**
271 * Use this before submitting a TableMap job. It will appropriately set up
272 * the job.
273 *
274 * @param table The table name to read from.
275 * @param scan The scan instance with the columns, time range etc.
276 * @param mapper The mapper class to use.
277 * @param outputKeyClass The class of the output key.
278 * @param outputValueClass The class of the output value.
279 * @param job The current job to adjust. Make sure the passed job is
280 * carrying all necessary HBase configuration.
281 * @param addDependencyJars upload HBase jars and jars for any of the configured
282 * job classes via the distributed cache (tmpjars).
283 * @throws IOException When setting up the details fails.
284 */
285 public static void initTableMapperJob(String table, Scan scan,
286 Class<? extends TableMapper> mapper,
287 Class<?> outputKeyClass,
288 Class<?> outputValueClass, Job job,
289 boolean addDependencyJars)
290 throws IOException {
291 initTableMapperJob(table, scan, mapper, outputKeyClass,
292 outputValueClass, job, addDependencyJars, TableInputFormat.class);
293 }
294
295 /**
296 * Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on
297 * direct memory will likely cause the map tasks to OOM when opening the region. This
298 * is done here instead of in TableSnapshotRegionRecordReader in case an advanced user
299 * wants to override this behavior in their job.
300 */
301 public static void resetCacheConfig(Configuration conf) {
302 conf.setFloat(
303 HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
304 conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0f);
305 conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY);
306 }
307
308 /**
309 * Sets up the job for reading from a table snapshot. It bypasses hbase servers
310 * and read directly from snapshot files.
311 *
312 * @param snapshotName The name of the snapshot (of a table) to read from.
313 * @param scan The scan instance with the columns, time range etc.
314 * @param mapper The mapper class to use.
315 * @param outputKeyClass The class of the output key.
316 * @param outputValueClass The class of the output value.
317 * @param job The current job to adjust. Make sure the passed job is
318 * carrying all necessary HBase configuration.
319 * @param addDependencyJars upload HBase jars and jars for any of the configured
320 * job classes via the distributed cache (tmpjars).
321 *
322 * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should
323 * have write permissions to this directory, and this should not be a subdirectory of rootdir.
324 * After the job is finished, restore directory can be deleted.
325 * @throws IOException When setting up the details fails.
326 * @see TableSnapshotInputFormat
327 */
328 public static void initTableSnapshotMapperJob(String snapshotName, Scan scan,
329 Class<? extends TableMapper> mapper,
330 Class<?> outputKeyClass,
331 Class<?> outputValueClass, Job job,
332 boolean addDependencyJars, Path tmpRestoreDir)
333 throws IOException {
334 TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir);
335 initTableMapperJob(snapshotName, scan, mapper, outputKeyClass,
336 outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class);
337 resetCacheConfig(job.getConfiguration());
338 }
339
340 /**
341 * Use this before submitting a Multi TableMap job. It will appropriately set
342 * up the job.
343 *
344 * @param scans The list of {@link Scan} objects to read from.
345 * @param mapper The mapper class to use.
346 * @param outputKeyClass The class of the output key.
347 * @param outputValueClass The class of the output value.
348 * @param job The current job to adjust. Make sure the passed job is carrying
349 * all necessary HBase configuration.
350 * @throws IOException When setting up the details fails.
351 */
352 public static void initTableMapperJob(List<Scan> scans,
353 Class<? extends TableMapper> mapper,
354 Class<?> outputKeyClass,
355 Class<?> outputValueClass, Job job) throws IOException {
356 initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job,
357 true);
358 }
359
360 /**
361 * Use this before submitting a Multi TableMap job. It will appropriately set
362 * up the job.
363 *
364 * @param scans The list of {@link Scan} objects to read from.
365 * @param mapper The mapper class to use.
366 * @param outputKeyClass The class of the output key.
367 * @param outputValueClass The class of the output value.
368 * @param job The current job to adjust. Make sure the passed job is carrying
369 * all necessary HBase configuration.
370 * @param addDependencyJars upload HBase jars and jars for any of the
371 * configured job classes via the distributed cache (tmpjars).
372 * @throws IOException When setting up the details fails.
373 */
374 public static void initTableMapperJob(List<Scan> scans,
375 Class<? extends TableMapper> mapper,
376 Class<?> outputKeyClass,
377 Class<?> outputValueClass, Job job,
378 boolean addDependencyJars) throws IOException {
379 initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job,
380 addDependencyJars, true);
381 }
382
383 /**
384 * Use this before submitting a Multi TableMap job. It will appropriately set
385 * up the job.
386 *
387 * @param scans The list of {@link Scan} objects to read from.
388 * @param mapper The mapper class to use.
389 * @param outputKeyClass The class of the output key.
390 * @param outputValueClass The class of the output value.
391 * @param job The current job to adjust. Make sure the passed job is carrying
392 * all necessary HBase configuration.
393 * @param addDependencyJars upload HBase jars and jars for any of the
394 * configured job classes via the distributed cache (tmpjars).
395 * @param initCredentials whether to initialize hbase auth credentials for the job
396 * @throws IOException When setting up the details fails.
397 */
398 public static void initTableMapperJob(List<Scan> scans,
399 Class<? extends TableMapper> mapper,
400 Class<?> outputKeyClass,
401 Class<?> outputValueClass, Job job,
402 boolean addDependencyJars,
403 boolean initCredentials) throws IOException {
404 job.setInputFormatClass(MultiTableInputFormat.class);
405 if (outputValueClass != null) {
406 job.setMapOutputValueClass(outputValueClass);
407 }
408 if (outputKeyClass != null) {
409 job.setMapOutputKeyClass(outputKeyClass);
410 }
411 job.setMapperClass(mapper);
412 Configuration conf = job.getConfiguration();
413 HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
414 List<String> scanStrings = new ArrayList<String>();
415
416 for (Scan scan : scans) {
417 scanStrings.add(convertScanToString(scan));
418 }
419 job.getConfiguration().setStrings(MultiTableInputFormat.SCANS,
420 scanStrings.toArray(new String[scanStrings.size()]));
421
422 if (addDependencyJars) {
423 addDependencyJars(job);
424 }
425
426 if (initCredentials) {
427 initCredentials(job);
428 }
429 }
430
431 public static void initCredentials(Job job) throws IOException {
432 UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
433 if (userProvider.isHadoopSecurityEnabled()) {
434 // propagate delegation related props from launcher job to MR job
435 if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
436 job.getConfiguration().set("mapreduce.job.credentials.binary",
437 System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
438 }
439 }
440
441 if (userProvider.isHBaseSecurityEnabled()) {
442 try {
443 // init credentials for remote cluster
444 String quorumAddress = job.getConfiguration().get(TableOutputFormat.QUORUM_ADDRESS);
445 User user = userProvider.getCurrent();
446 if (quorumAddress != null) {
447 Configuration peerConf = HBaseConfiguration.create(job.getConfiguration());
448 ZKUtil.applyClusterKeyToConf(peerConf, quorumAddress);
449 Connection peerConn = ConnectionFactory.createConnection(peerConf);
450 try {
451 TokenUtil.addTokenForJob(peerConn, user, job);
452 } finally {
453 peerConn.close();
454 }
455 }
456
457 Connection conn = ConnectionFactory.createConnection(job.getConfiguration());
458 try {
459 TokenUtil.addTokenForJob(conn, user, job);
460 } finally {
461 conn.close();
462 }
463 } catch (InterruptedException ie) {
464 LOG.info("Interrupted obtaining user authentication token");
465 Thread.currentThread().interrupt();
466 }
467 }
468 }
469
470 /**
471 * Obtain an authentication token, for the specified cluster, on behalf of the current user
472 * and add it to the credentials for the given map reduce job.
473 *
474 * The quorumAddress is the key to the ZK ensemble, which contains:
475 * hbase.zookeeper.quorum, hbase.zookeeper.client.port and zookeeper.znode.parent
476 *
477 * @param job The job that requires the permission.
478 * @param quorumAddress string that contains the 3 required configuratins
479 * @throws IOException When the authentication token cannot be obtained.
480 */
481 public static void initCredentialsForCluster(Job job, String quorumAddress)
482 throws IOException {
483 UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
484 if (userProvider.isHBaseSecurityEnabled()) {
485 try {
486 Configuration peerConf = HBaseConfiguration.create(job.getConfiguration());
487 ZKUtil.applyClusterKeyToConf(peerConf, quorumAddress);
488 Connection peerConn = ConnectionFactory.createConnection(peerConf);
489 try {
490 TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job);
491 } finally {
492 peerConn.close();
493 }
494 } catch (InterruptedException e) {
495 LOG.info("Interrupted obtaining user authentication token");
496 Thread.interrupted();
497 }
498 }
499 }
500
501 /**
502 * Writes the given scan into a Base64 encoded string.
503 *
504 * @param scan The scan to write out.
505 * @return The scan saved in a Base64 encoded string.
506 * @throws IOException When writing the scan fails.
507 */
508 static String convertScanToString(Scan scan) throws IOException {
509 ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
510 return Base64.encodeBytes(proto.toByteArray());
511 }
512
513 /**
514 * Converts the given Base64 string back into a Scan instance.
515 *
516 * @param base64 The scan details.
517 * @return The newly created Scan instance.
518 * @throws IOException When reading the scan instance fails.
519 */
520 static Scan convertStringToScan(String base64) throws IOException {
521 byte [] decoded = Base64.decode(base64);
522 ClientProtos.Scan scan;
523 try {
524 scan = ClientProtos.Scan.parseFrom(decoded);
525 } catch (InvalidProtocolBufferException ipbe) {
526 throw new IOException(ipbe);
527 }
528
529 return ProtobufUtil.toScan(scan);
530 }
531
532 /**
533 * Use this before submitting a TableReduce job. It will
534 * appropriately set up the JobConf.
535 *
536 * @param table The output table.
537 * @param reducer The reducer class to use.
538 * @param job The current job to adjust.
539 * @throws IOException When determining the region count fails.
540 */
541 public static void initTableReducerJob(String table,
542 Class<? extends TableReducer> reducer, Job job)
543 throws IOException {
544 initTableReducerJob(table, reducer, job, null);
545 }
546
547 /**
548 * Use this before submitting a TableReduce job. It will
549 * appropriately set up the JobConf.
550 *
551 * @param table The output table.
552 * @param reducer The reducer class to use.
553 * @param job The current job to adjust.
554 * @param partitioner Partitioner to use. Pass <code>null</code> to use
555 * default partitioner.
556 * @throws IOException When determining the region count fails.
557 */
558 public static void initTableReducerJob(String table,
559 Class<? extends TableReducer> reducer, Job job,
560 Class partitioner) throws IOException {
561 initTableReducerJob(table, reducer, job, partitioner, null, null, null);
562 }
563
564 /**
565 * Use this before submitting a TableReduce job. It will
566 * appropriately set up the JobConf.
567 *
568 * @param table The output table.
569 * @param reducer The reducer class to use.
570 * @param job The current job to adjust. Make sure the passed job is
571 * carrying all necessary HBase configuration.
572 * @param partitioner Partitioner to use. Pass <code>null</code> to use
573 * default partitioner.
574 * @param quorumAddress Distant cluster to write to; default is null for
575 * output to the cluster that is designated in <code>hbase-site.xml</code>.
576 * Set this String to the zookeeper ensemble of an alternate remote cluster
577 * when you would have the reduce write a cluster that is other than the
578 * default; e.g. copying tables between clusters, the source would be
579 * designated by <code>hbase-site.xml</code> and this param would have the
580 * ensemble address of the remote cluster. The format to pass is particular.
581 * Pass <code> <hbase.zookeeper.quorum>:<hbase.zookeeper.client.port>:<zookeeper.znode.parent>
582 * </code> such as <code>server,server2,server3:2181:/hbase</code>.
583 * @param serverClass redefined hbase.regionserver.class
584 * @param serverImpl redefined hbase.regionserver.impl
585 * @throws IOException When determining the region count fails.
586 */
587 public static void initTableReducerJob(String table,
588 Class<? extends TableReducer> reducer, Job job,
589 Class partitioner, String quorumAddress, String serverClass,
590 String serverImpl) throws IOException {
591 initTableReducerJob(table, reducer, job, partitioner, quorumAddress,
592 serverClass, serverImpl, true);
593 }
594
595 /**
596 * Use this before submitting a TableReduce job. It will
597 * appropriately set up the JobConf.
598 *
599 * @param table The output table.
600 * @param reducer The reducer class to use.
601 * @param job The current job to adjust. Make sure the passed job is
602 * carrying all necessary HBase configuration.
603 * @param partitioner Partitioner to use. Pass <code>null</code> to use
604 * default partitioner.
605 * @param quorumAddress Distant cluster to write to; default is null for
606 * output to the cluster that is designated in <code>hbase-site.xml</code>.
607 * Set this String to the zookeeper ensemble of an alternate remote cluster
608 * when you would have the reduce write a cluster that is other than the
609 * default; e.g. copying tables between clusters, the source would be
610 * designated by <code>hbase-site.xml</code> and this param would have the
611 * ensemble address of the remote cluster. The format to pass is particular.
612 * Pass <code> <hbase.zookeeper.quorum>:<hbase.zookeeper.client.port>:<zookeeper.znode.parent>
613 * </code> such as <code>server,server2,server3:2181:/hbase</code>.
614 * @param serverClass redefined hbase.regionserver.class
615 * @param serverImpl redefined hbase.regionserver.impl
616 * @param addDependencyJars upload HBase jars and jars for any of the configured
617 * job classes via the distributed cache (tmpjars).
618 * @throws IOException When determining the region count fails.
619 */
620 public static void initTableReducerJob(String table,
621 Class<? extends TableReducer> reducer, Job job,
622 Class partitioner, String quorumAddress, String serverClass,
623 String serverImpl, boolean addDependencyJars) throws IOException {
624
625 Configuration conf = job.getConfiguration();
626 HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
627 job.setOutputFormatClass(TableOutputFormat.class);
628 if (reducer != null) job.setReducerClass(reducer);
629 conf.set(TableOutputFormat.OUTPUT_TABLE, table);
630 conf.setStrings("io.serializations", conf.get("io.serializations"),
631 MutationSerialization.class.getName(), ResultSerialization.class.getName());
632 // If passed a quorum/ensemble address, pass it on to TableOutputFormat.
633 if (quorumAddress != null) {
634 // Calling this will validate the format
635 ZKUtil.transformClusterKey(quorumAddress);
636 conf.set(TableOutputFormat.QUORUM_ADDRESS,quorumAddress);
637 }
638 if (serverClass != null && serverImpl != null) {
639 conf.set(TableOutputFormat.REGION_SERVER_CLASS, serverClass);
640 conf.set(TableOutputFormat.REGION_SERVER_IMPL, serverImpl);
641 }
642 job.setOutputKeyClass(ImmutableBytesWritable.class);
643 job.setOutputValueClass(Writable.class);
644 if (partitioner == HRegionPartitioner.class) {
645 job.setPartitionerClass(HRegionPartitioner.class);
646 int regions = MetaTableAccessor.getRegionCount(conf, TableName.valueOf(table));
647 if (job.getNumReduceTasks() > regions) {
648 job.setNumReduceTasks(regions);
649 }
650 } else if (partitioner != null) {
651 job.setPartitionerClass(partitioner);
652 }
653
654 if (addDependencyJars) {
655 addDependencyJars(job);
656 }
657
658 initCredentials(job);
659 }
660
661 /**
662 * Ensures that the given number of reduce tasks for the given job
663 * configuration does not exceed the number of regions for the given table.
664 *
665 * @param table The table to get the region count for.
666 * @param job The current job to adjust.
667 * @throws IOException When retrieving the table details fails.
668 */
669 public static void limitNumReduceTasks(String table, Job job)
670 throws IOException {
671 int regions =
672 MetaTableAccessor.getRegionCount(job.getConfiguration(), TableName.valueOf(table));
673 if (job.getNumReduceTasks() > regions)
674 job.setNumReduceTasks(regions);
675 }
676
677 /**
678 * Sets the number of reduce tasks for the given job configuration to the
679 * number of regions the given table has.
680 *
681 * @param table The table to get the region count for.
682 * @param job The current job to adjust.
683 * @throws IOException When retrieving the table details fails.
684 */
685 public static void setNumReduceTasks(String table, Job job)
686 throws IOException {
687 job.setNumReduceTasks(MetaTableAccessor.getRegionCount(job.getConfiguration(),
688 TableName.valueOf(table)));
689 }
690
691 /**
692 * Sets the number of rows to return and cache with each scanner iteration.
693 * Higher caching values will enable faster mapreduce jobs at the expense of
694 * requiring more heap to contain the cached rows.
695 *
696 * @param job The current job to adjust.
697 * @param batchSize The number of rows to return in batch with each scanner
698 * iteration.
699 */
700 public static void setScannerCaching(Job job, int batchSize) {
701 job.getConfiguration().setInt("hbase.client.scanner.caching", batchSize);
702 }
703
704 /**
705 * Add HBase and its dependencies (only) to the job configuration.
706 * <p>
707 * This is intended as a low-level API, facilitating code reuse between this
708 * class and its mapred counterpart. It also of use to external tools that
709 * need to build a MapReduce job that interacts with HBase but want
710 * fine-grained control over the jars shipped to the cluster.
711 * </p>
712 * @param conf The Configuration object to extend with dependencies.
713 * @see org.apache.hadoop.hbase.mapred.TableMapReduceUtil
714 * @see <a href="https://issues.apache.org/jira/browse/PIG-3285">PIG-3285</a>
715 */
716 public static void addHBaseDependencyJars(Configuration conf) throws IOException {
717
718 // PrefixTreeCodec is part of the hbase-prefix-tree module. If not included in MR jobs jar
719 // dependencies, MR jobs that write encoded hfiles will fail.
720 // We used reflection here so to prevent a circular module dependency.
721 // TODO - if we extract the MR into a module, make it depend on hbase-prefix-tree.
722 Class prefixTreeCodecClass = null;
723 try {
724 prefixTreeCodecClass =
725 Class.forName("org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeCodec");
726 } catch (ClassNotFoundException e) {
727 // this will show up in unit tests but should not show in real deployments
728 LOG.warn("The hbase-prefix-tree module jar containing PrefixTreeCodec is not present." +
729 " Continuing without it.");
730 }
731
732 addDependencyJars(conf,
733 // explicitly pull a class from each module
734 org.apache.hadoop.hbase.HConstants.class, // hbase-common
735 org.apache.hadoop.hbase.protobuf.generated.ClientProtos.class, // hbase-protocol
736 org.apache.hadoop.hbase.client.Put.class, // hbase-client
737 org.apache.hadoop.hbase.CompatibilityFactory.class, // hbase-hadoop-compat
738 org.apache.hadoop.hbase.mapreduce.TableMapper.class, // hbase-server
739 prefixTreeCodecClass, // hbase-prefix-tree (if null will be skipped)
740 // pull necessary dependencies
741 org.apache.zookeeper.ZooKeeper.class,
742 io.netty.channel.Channel.class,
743 com.google.protobuf.Message.class,
744 com.google.common.collect.Lists.class,
745 org.apache.htrace.Trace.class,
746 com.yammer.metrics.core.MetricsRegistry.class);
747 }
748
749 /**
750 * Returns a classpath string built from the content of the "tmpjars" value in {@code conf}.
751 * Also exposed to shell scripts via `bin/hbase mapredcp`.
752 */
753 public static String buildDependencyClasspath(Configuration conf) {
754 if (conf == null) {
755 throw new IllegalArgumentException("Must provide a configuration object.");
756 }
757 Set<String> paths = new HashSet<String>(conf.getStringCollection("tmpjars"));
758 if (paths.size() == 0) {
759 throw new IllegalArgumentException("Configuration contains no tmpjars.");
760 }
761 StringBuilder sb = new StringBuilder();
762 for (String s : paths) {
763 // entries can take the form 'file:/path/to/file.jar'.
764 int idx = s.indexOf(":");
765 if (idx != -1) s = s.substring(idx + 1);
766 if (sb.length() > 0) sb.append(File.pathSeparator);
767 sb.append(s);
768 }
769 return sb.toString();
770 }
771
772 /**
773 * Add the HBase dependency jars as well as jars for any of the configured
774 * job classes to the job configuration, so that JobClient will ship them
775 * to the cluster and add them to the DistributedCache.
776 */
777 public static void addDependencyJars(Job job) throws IOException {
778 addHBaseDependencyJars(job.getConfiguration());
779 try {
780 addDependencyJars(job.getConfiguration(),
781 // when making changes here, consider also mapred.TableMapReduceUtil
782 // pull job classes
783 job.getMapOutputKeyClass(),
784 job.getMapOutputValueClass(),
785 job.getInputFormatClass(),
786 job.getOutputKeyClass(),
787 job.getOutputValueClass(),
788 job.getOutputFormatClass(),
789 job.getPartitionerClass(),
790 job.getCombinerClass());
791 } catch (ClassNotFoundException e) {
792 throw new IOException(e);
793 }
794 }
795
796 /**
797 * Add the jars containing the given classes to the job's configuration
798 * such that JobClient will ship them to the cluster and add them to
799 * the DistributedCache.
800 */
801 public static void addDependencyJars(Configuration conf,
802 Class<?>... classes) throws IOException {
803
804 FileSystem localFs = FileSystem.getLocal(conf);
805 Set<String> jars = new HashSet<String>();
806 // Add jars that are already in the tmpjars variable
807 jars.addAll(conf.getStringCollection("tmpjars"));
808
809 // add jars as we find them to a map of contents jar name so that we can avoid
810 // creating new jars for classes that have already been packaged.
811 Map<String, String> packagedClasses = new HashMap<String, String>();
812
813 // Add jars containing the specified classes
814 for (Class<?> clazz : classes) {
815 if (clazz == null) continue;
816
817 Path path = findOrCreateJar(clazz, localFs, packagedClasses);
818 if (path == null) {
819 LOG.warn("Could not find jar for class " + clazz +
820 " in order to ship it to the cluster.");
821 continue;
822 }
823 if (!localFs.exists(path)) {
824 LOG.warn("Could not validate jar file " + path + " for class "
825 + clazz);
826 continue;
827 }
828 jars.add(path.toString());
829 }
830 if (jars.isEmpty()) return;
831
832 conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[jars.size()])));
833 }
834
835 /**
836 * Finds the Jar for a class or creates it if it doesn't exist. If the class is in
837 * a directory in the classpath, it creates a Jar on the fly with the
838 * contents of the directory and returns the path to that Jar. If a Jar is
839 * created, it is created in the system temporary directory. Otherwise,
840 * returns an existing jar that contains a class of the same name. Maintains
841 * a mapping from jar contents to the tmp jar created.
842 * @param my_class the class to find.
843 * @param fs the FileSystem with which to qualify the returned path.
844 * @param packagedClasses a map of class name to path.
845 * @return a jar file that contains the class.
846 * @throws IOException
847 */
848 private static Path findOrCreateJar(Class<?> my_class, FileSystem fs,
849 Map<String, String> packagedClasses)
850 throws IOException {
851 // attempt to locate an existing jar for the class.
852 String jar = findContainingJar(my_class, packagedClasses);
853 if (null == jar || jar.isEmpty()) {
854 jar = getJar(my_class);
855 updateMap(jar, packagedClasses);
856 }
857
858 if (null == jar || jar.isEmpty()) {
859 return null;
860 }
861
862 LOG.debug(String.format("For class %s, using jar %s", my_class.getName(), jar));
863 return new Path(jar).makeQualified(fs);
864 }
865
866 /**
867 * Add entries to <code>packagedClasses</code> corresponding to class files
868 * contained in <code>jar</code>.
869 * @param jar The jar who's content to list.
870 * @param packagedClasses map[class -> jar]
871 */
872 private static void updateMap(String jar, Map<String, String> packagedClasses) throws IOException {
873 if (null == jar || jar.isEmpty()) {
874 return;
875 }
876 ZipFile zip = null;
877 try {
878 zip = new ZipFile(jar);
879 for (Enumeration<? extends ZipEntry> iter = zip.entries(); iter.hasMoreElements();) {
880 ZipEntry entry = iter.nextElement();
881 if (entry.getName().endsWith("class")) {
882 packagedClasses.put(entry.getName(), jar);
883 }
884 }
885 } finally {
886 if (null != zip) zip.close();
887 }
888 }
889
890 /**
891 * Find a jar that contains a class of the same name, if any. It will return
892 * a jar file, even if that is not the first thing on the class path that
893 * has a class with the same name. Looks first on the classpath and then in
894 * the <code>packagedClasses</code> map.
895 * @param my_class the class to find.
896 * @return a jar file that contains the class, or null.
897 * @throws IOException
898 */
899 private static String findContainingJar(Class<?> my_class, Map<String, String> packagedClasses)
900 throws IOException {
901 ClassLoader loader = my_class.getClassLoader();
902
903 String class_file = my_class.getName().replaceAll("\\.", "/") + ".class";
904
905 if (loader != null) {
906 // first search the classpath
907 for (Enumeration<URL> itr = loader.getResources(class_file); itr.hasMoreElements();) {
908 URL url = itr.nextElement();
909 if ("jar".equals(url.getProtocol())) {
910 String toReturn = url.getPath();
911 if (toReturn.startsWith("file:")) {
912 toReturn = toReturn.substring("file:".length());
913 }
914 // URLDecoder is a misnamed class, since it actually decodes
915 // x-www-form-urlencoded MIME type rather than actual
916 // URL encoding (which the file path has). Therefore it would
917 // decode +s to ' 's which is incorrect (spaces are actually
918 // either unencoded or encoded as "%20"). Replace +s first, so
919 // that they are kept sacred during the decoding process.
920 toReturn = toReturn.replaceAll("\\+", "%2B");
921 toReturn = URLDecoder.decode(toReturn, "UTF-8");
922 return toReturn.replaceAll("!.*$", "");
923 }
924 }
925 }
926
927 // now look in any jars we've packaged using JarFinder. Returns null when
928 // no jar is found.
929 return packagedClasses.get(class_file);
930 }
931
932 /**
933 * Invoke 'getJar' on a custom JarFinder implementation. Useful for some job
934 * configuration contexts (HBASE-8140) and also for testing on MRv2.
935 * check if we have HADOOP-9426.
936 * @param my_class the class to find.
937 * @return a jar file that contains the class, or null.
938 */
939 private static String getJar(Class<?> my_class) {
940 String ret = null;
941 try {
942 ret = JarFinder.getJar(my_class);
943 } catch (Exception e) {
944 // toss all other exceptions, related to reflection failure
945 throw new RuntimeException("getJar invocation failed.", e);
946 }
947
948 return ret;
949 }
950 }