View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.mapreduce;
20  
21  import java.io.IOException;
22  import java.util.HashMap;
23  import java.util.Map;
24  
25  import org.apache.commons.logging.Log;
26  import org.apache.commons.logging.LogFactory;
27  import org.apache.hadoop.hbase.classification.InterfaceAudience;
28  import org.apache.hadoop.hbase.classification.InterfaceStability;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.hbase.HBaseConfiguration;
31  import org.apache.hadoop.hbase.TableName;
32  import org.apache.hadoop.hbase.client.BufferedMutator;
33  import org.apache.hadoop.hbase.client.Connection;
34  import org.apache.hadoop.hbase.client.ConnectionFactory;
35  import org.apache.hadoop.hbase.client.Delete;
36  import org.apache.hadoop.hbase.client.Mutation;
37  import org.apache.hadoop.hbase.client.Put;
38  import org.apache.hadoop.hbase.client.Durability;
39  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
40  import org.apache.hadoop.hbase.util.Bytes;
41  import org.apache.hadoop.mapreduce.JobContext;
42  import org.apache.hadoop.mapreduce.OutputCommitter;
43  import org.apache.hadoop.mapreduce.OutputFormat;
44  import org.apache.hadoop.mapreduce.RecordWriter;
45  import org.apache.hadoop.mapreduce.TaskAttemptContext;
46  
47  /**
48   * <p>
49   * Hadoop output format that writes to one or more HBase tables. The key is
50   * taken to be the table name while the output value <em>must</em> be either a
51   * {@link Put} or a {@link Delete} instance. All tables must already exist, and
52   * all Puts and Deletes must reference only valid column families.
53   * </p>
54   *
55   * <p>
56   * Write-ahead logging (WAL) for Puts can be disabled by setting
57   * {@link #WAL_PROPERTY} to {@link #WAL_OFF}. Default value is {@link #WAL_ON}.
58   * Note that disabling write-ahead logging is only appropriate for jobs where
59   * loss of data due to region server failure can be tolerated (for example,
60   * because it is easy to rerun a bulk import).
61   * </p>
62   */
63  @InterfaceAudience.Public
64  @InterfaceStability.Stable
65  public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable, Mutation> {
66    /** Set this to {@link #WAL_OFF} to turn off write-ahead logging (WAL) */
67    public static final String WAL_PROPERTY = "hbase.mapreduce.multitableoutputformat.wal";
68    /** Property value to use write-ahead logging */
69    public static final boolean WAL_ON = true;
70    /** Property value to disable write-ahead logging */
71    public static final boolean WAL_OFF = false;
72    /**
73     * Record writer for outputting to multiple HTables.
74     */
75    protected static class MultiTableRecordWriter extends
76        RecordWriter<ImmutableBytesWritable, Mutation> {
77      private static final Log LOG = LogFactory.getLog(MultiTableRecordWriter.class);
78      Connection connection;
79      Map<ImmutableBytesWritable, BufferedMutator> mutatorMap = new HashMap<>();
80      Configuration conf;
81      boolean useWriteAheadLogging;
82  
83      /**
84       * @param conf
85       *          HBaseConfiguration to used
86       * @param useWriteAheadLogging
87       *          whether to use write ahead logging. This can be turned off (
88       *          <tt>false</tt>) to improve performance when bulk loading data.
89       */
90      public MultiTableRecordWriter(Configuration conf,
91          boolean useWriteAheadLogging) throws IOException {
92        LOG.debug("Created new MultiTableRecordReader with WAL "
93            + (useWriteAheadLogging ? "on" : "off"));
94        this.conf = conf;
95        this.useWriteAheadLogging = useWriteAheadLogging;
96      }
97  
98      /**
99       * @param tableName
100      *          the name of the table, as a string
101      * @return the named mutator
102      * @throws IOException
103      *           if there is a problem opening a table
104      */
105     BufferedMutator getBufferedMutator(ImmutableBytesWritable tableName) throws IOException {
106       if(this.connection == null){
107         this.connection = ConnectionFactory.createConnection(conf);
108       }
109       if (!mutatorMap.containsKey(tableName)) {
110         LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get())+ "\" for writing");
111 
112         BufferedMutator mutator =
113             connection.getBufferedMutator(TableName.valueOf(tableName.get()));
114         mutatorMap.put(tableName, mutator);
115       }
116       return mutatorMap.get(tableName);
117     }
118 
119     @Override
120     public void close(TaskAttemptContext context) throws IOException {
121       for (BufferedMutator mutator : mutatorMap.values()) {
122         mutator.flush();
123       }
124       if(connection != null){
125         connection.close();
126       }
127     }
128 
129     /**
130      * Writes an action (Put or Delete) to the specified table.
131      *
132      * @param tableName
133      *          the table being updated.
134      * @param action
135      *          the update, either a put or a delete.
136      * @throws IllegalArgumentException
137      *          if the action is not a put or a delete.
138      */
139     @Override
140     public void write(ImmutableBytesWritable tableName, Mutation action) throws IOException {
141       BufferedMutator mutator = getBufferedMutator(tableName);
142       // The actions are not immutable, so we defensively copy them
143       if (action instanceof Put) {
144         Put put = new Put((Put) action);
145         put.setDurability(useWriteAheadLogging ? Durability.SYNC_WAL
146             : Durability.SKIP_WAL);
147         mutator.mutate(put);
148       } else if (action instanceof Delete) {
149         Delete delete = new Delete((Delete) action);
150         mutator.mutate(delete);
151       } else
152         throw new IllegalArgumentException(
153             "action must be either Delete or Put");
154     }
155   }
156 
157   @Override
158   public void checkOutputSpecs(JobContext context) throws IOException,
159       InterruptedException {
160     // we can't know ahead of time if it's going to blow up when the user
161     // passes a table name that doesn't exist, so nothing useful here.
162   }
163 
164   @Override
165   public OutputCommitter getOutputCommitter(TaskAttemptContext context)
166       throws IOException, InterruptedException {
167     return new TableOutputCommitter();
168   }
169 
170   @Override
171   public RecordWriter<ImmutableBytesWritable, Mutation> getRecordWriter(TaskAttemptContext context)
172       throws IOException, InterruptedException {
173     Configuration conf = context.getConfiguration();
174     return new MultiTableRecordWriter(HBaseConfiguration.create(conf),
175         conf.getBoolean(WAL_PROPERTY, WAL_ON));
176   }
177 
178 }