001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.mapreduce; 019 020import java.io.IOException; 021import java.nio.charset.Charset; 022import java.util.List; 023import org.apache.hadoop.hbase.client.TableDescriptor; 024import org.apache.hadoop.hbase.io.ImmutableBytesWritable; 025import org.apache.hadoop.hbase.util.Bytes; 026import org.apache.hadoop.mapreduce.Job; 027import org.apache.yetus.audience.InterfaceAudience; 028import org.slf4j.Logger; 029import org.slf4j.LoggerFactory; 030 031/** 032 * Create 3 level tree directory, first level is using table name as parent directory and then use 033 * family name as child directory, and all related HFiles for one family are under child directory 034 * -tableName1 -columnFamilyName1 -columnFamilyName2 -HFiles -tableName2 -columnFamilyName1 -HFiles 035 * -columnFamilyName2 036 */ 037@InterfaceAudience.Public 038public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { 039 private static final Logger LOG = LoggerFactory.getLogger(MultiTableHFileOutputFormat.class); 040 041 /** 042 * Creates a composite key to use as a mapper output key when using 043 * MultiTableHFileOutputFormat.configureIncrementaLoad to set up bulk ingest job 044 * @param tableName Name of the Table - Eg: TableName.getNameAsString() 045 * @param suffix Usually represents a rowkey when creating a mapper key or column family 046 * @return byte[] representation of composite key 047 */ 048 public static byte[] createCompositeKey(byte[] tableName, byte[] suffix) { 049 return combineTableNameSuffix(tableName, suffix); 050 } 051 052 /** 053 * Alternate api which accepts an ImmutableBytesWritable for the suffix 054 * @see MultiTableHFileOutputFormat#createCompositeKey(byte[], byte[]) 055 */ 056 public static byte[] createCompositeKey(byte[] tableName, ImmutableBytesWritable suffix) { 057 return combineTableNameSuffix(tableName, suffix.get()); 058 } 059 060 /** 061 * Alternate api which accepts a String for the tableName and ImmutableBytesWritable for the 062 * suffix 063 * @see MultiTableHFileOutputFormat#createCompositeKey(byte[], byte[]) 064 */ 065 public static byte[] createCompositeKey(String tableName, ImmutableBytesWritable suffix) { 066 return combineTableNameSuffix(tableName.getBytes(Charset.forName("UTF-8")), suffix.get()); 067 } 068 069 /** 070 * Analogous to 071 * {@link HFileOutputFormat2#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}, this 072 * function will configure the requisite number of reducers to write HFiles for multple tables 073 * simultaneously 074 * @param job See {@link org.apache.hadoop.mapreduce.Job} 075 * @param multiTableDescriptors Table descriptor and region locator pairs n 076 */ 077 public static void configureIncrementalLoad(Job job, List<TableInfo> multiTableDescriptors) 078 throws IOException { 079 MultiTableHFileOutputFormat.configureIncrementalLoad(job, multiTableDescriptors, 080 MultiTableHFileOutputFormat.class); 081 } 082 083 final private static int validateCompositeKey(byte[] keyBytes) { 084 085 int separatorIdx = Bytes.indexOf(keyBytes, tableSeparator); 086 087 // Either the separator was not found or a tablename wasn't present or a key wasn't present 088 if (separatorIdx == -1) { 089 throw new IllegalArgumentException("Invalid format for composite key [" 090 + Bytes.toStringBinary(keyBytes) + "]. Cannot extract tablename and suffix from key"); 091 } 092 return separatorIdx; 093 } 094 095 protected static byte[] getTableName(byte[] keyBytes) { 096 int separatorIdx = validateCompositeKey(keyBytes); 097 return Bytes.copy(keyBytes, 0, separatorIdx); 098 } 099 100 protected static byte[] getSuffix(byte[] keyBytes) { 101 int separatorIdx = validateCompositeKey(keyBytes); 102 return Bytes.copy(keyBytes, separatorIdx + 1, keyBytes.length - separatorIdx - 1); 103 } 104}