001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.mapreduce;
019
020import java.io.IOException;
021import java.util.Iterator;
022import java.util.List;
023import org.apache.hadoop.conf.Configuration;
024import org.apache.hadoop.conf.Configured;
025import org.apache.hadoop.fs.Path;
026import org.apache.hadoop.hbase.HBaseConfiguration;
027import org.apache.hadoop.hbase.client.Put;
028import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
029import org.apache.hadoop.hbase.util.Bytes;
030import org.apache.hadoop.io.LongWritable;
031import org.apache.hadoop.io.Text;
032import org.apache.hadoop.mapreduce.Job;
033import org.apache.hadoop.mapreduce.Mapper;
034import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
035import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
036import org.apache.hadoop.util.Tool;
037import org.apache.hadoop.util.ToolRunner;
038import org.apache.yetus.audience.InterfaceAudience;
039
040import org.apache.hbase.thirdparty.com.google.common.base.Splitter;
041
042/**
043 * Sample Uploader MapReduce
044 * <p>
045 * This is EXAMPLE code. You will need to change it to work for your context.
046 * <p>
047 * Uses {@link TableReducer} to put the data into HBase. Change the InputFormat to suit your data.
048 * In this example, we are importing a CSV file.
049 * <p>
050 *
051 * <pre>
052 * row,family,qualifier,value
053 * </pre>
054 * <p>
055 * The table and columnfamily we're to insert into must preexist.
056 * <p>
057 * There is no reducer in this example as it is not necessary and adds significant overhead. If you
058 * need to do any massaging of data before inserting into HBase, you can do this in the map as well.
059 * <p>
060 * Do the following to start the MR job:
061 *
062 * <pre>
063 * ./bin/hadoop org.apache.hadoop.hbase.mapreduce.SampleUploader /tmp/input.csv TABLE_NAME
064 * </pre>
065 * <p>
066 * This code was written against HBase 0.21 trunk.
067 */
068@InterfaceAudience.Private
069public class SampleUploader extends Configured implements Tool {
070
071  private static final String NAME = "SampleUploader";
072
073  static class Uploader extends Mapper<LongWritable, Text, ImmutableBytesWritable, Put> {
074    private long checkpoint = 100;
075    private long count = 0;
076
077    @Override
078    public void map(LongWritable key, Text line, Context context) throws IOException {
079      // Input is a CSV file
080      // Each map() is a single line, where the key is the line number
081      // Each line is comma-delimited; row,family,qualifier,value
082
083      // Split CSV line
084      List<String> values = Splitter.on(',').splitToList(line.toString());
085      if (values.size() != 4) {
086        return;
087      }
088      Iterator<String> i = values.iterator();
089      // Extract each value
090      byte[] row = Bytes.toBytes(i.next());
091      byte[] family = Bytes.toBytes(i.next());
092      byte[] qualifier = Bytes.toBytes(i.next());
093      byte[] value = Bytes.toBytes(i.next());
094
095      // Create Put
096      Put put = new Put(row);
097      put.addColumn(family, qualifier, value);
098
099      // Uncomment below to disable WAL. This will improve performance but means
100      // you will experience data loss in the case of a RegionServer crash.
101      // put.setWriteToWAL(false);
102
103      try {
104        context.write(new ImmutableBytesWritable(row), put);
105      } catch (InterruptedException e) {
106        e.printStackTrace();
107      }
108
109      // Set status every checkpoint lines
110      if (++count % checkpoint == 0) {
111        context.setStatus("Emitting Put " + count);
112      }
113    }
114  }
115
116  /**
117   * Job configuration.
118   */
119  public static Job configureJob(Configuration conf, String[] args) throws IOException {
120    Path inputPath = new Path(args[0]);
121    String tableName = args[1];
122    Job job = new Job(conf, NAME + "_" + tableName);
123    job.setJarByClass(Uploader.class);
124    FileInputFormat.setInputPaths(job, inputPath);
125    job.setInputFormatClass(SequenceFileInputFormat.class);
126    job.setMapperClass(Uploader.class);
127    // No reducers. Just write straight to table. Call initTableReducerJob
128    // because it sets up the TableOutputFormat.
129    TableMapReduceUtil.initTableReducerJob(tableName, null, job);
130    job.setNumReduceTasks(0);
131    return job;
132  }
133
134  /**
135   * Main entry point.
136   * @param otherArgs The command line parameters after ToolRunner handles standard.
137   * @throws Exception When running the job fails.
138   */
139  @Override
140  public int run(String[] otherArgs) throws Exception {
141    if (otherArgs.length != 2) {
142      System.err.println("Wrong number of arguments: " + otherArgs.length);
143      System.err.println("Usage: " + NAME + " <input> <tablename>");
144      return -1;
145    }
146    Job job = configureJob(getConf(), otherArgs);
147    return (job.waitForCompletion(true) ? 0 : 1);
148  }
149
150  public static void main(String[] args) throws Exception {
151    int status = ToolRunner.run(HBaseConfiguration.create(), new SampleUploader(), args);
152    System.exit(status);
153  }
154}