001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.mapreduce;
019
020import java.io.IOException;
021import org.apache.hadoop.hbase.client.Mutation;
022import org.apache.hadoop.io.Writable;
023import org.apache.yetus.audience.InterfaceAudience;
024import org.slf4j.Logger;
025import org.slf4j.LoggerFactory;
026
027/**
028 * Convenience class that simply writes all values (which must be
029 * {@link org.apache.hadoop.hbase.client.Put Put} or {@link org.apache.hadoop.hbase.client.Delete
030 * Delete} instances) passed to it out to the configured HBase table. This works in combination with
031 * {@link TableOutputFormat} which actually does the writing to HBase.
032 * <p>
033 * Keys are passed along but ignored in TableOutputFormat. However, they can be used to control how
034 * your values will be divided up amongst the specified number of reducers.
035 * <p>
036 * You can also use the {@link TableMapReduceUtil} class to set up the two classes in one step:
037 * <blockquote><code>
038 * TableMapReduceUtil.initTableReducerJob("table", IdentityTableReducer.class, job);
039 * </code></blockquote> This will also set the proper {@link TableOutputFormat} which is given the
040 * <code>table</code> parameter. The {@link org.apache.hadoop.hbase.client.Put Put} or
041 * {@link org.apache.hadoop.hbase.client.Delete Delete} define the row and columns implicitly.
042 */
043@InterfaceAudience.Public
044public class IdentityTableReducer extends TableReducer<Writable, Mutation, Writable> {
045
046  @SuppressWarnings("unused")
047  private static final Logger LOG = LoggerFactory.getLogger(IdentityTableReducer.class);
048
049  /**
050   * Writes each given record, consisting of the row key and the given values, to the configured
051   * {@link org.apache.hadoop.mapreduce.OutputFormat}. It is emitting the row key and each
052   * {@link org.apache.hadoop.hbase.client.Put Put} or {@link org.apache.hadoop.hbase.client.Delete
053   * Delete} as separate pairs.
054   * @param key     The current row key.
055   * @param values  The {@link org.apache.hadoop.hbase.client.Put Put} or
056   *                {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given row.
057   * @param context The context of the reduce.
058   * @throws IOException          When writing the record fails.
059   * @throws InterruptedException When the job gets interrupted.
060   */
061  @Override
062  public void reduce(Writable key, Iterable<Mutation> values, Context context)
063    throws IOException, InterruptedException {
064    for (Mutation putOrDelete : values) {
065      context.write(key, putOrDelete);
066    }
067  }
068}