001/* 002 * Licensed to the Apache Software Foundation (ASF) under one or more contributor license 003 * agreements. See the NOTICE file distributed with this work for additional information regarding 004 * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the 005 * "License"); you may not use this file except in compliance with the License. You may obtain a 006 * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable 007 * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" 008 * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License 009 * for the specific language governing permissions and limitations under the License. 010 */ 011package org.apache.hadoop.hbase.io.encoding; 012 013import java.io.DataOutputStream; 014import java.io.IOException; 015 016import org.apache.hadoop.hbase.Cell; 017import org.apache.hadoop.hbase.CellComparatorImpl; 018import org.apache.hadoop.hbase.io.ByteArrayOutputStream; 019import org.apache.yetus.audience.InterfaceAudience; 020import org.slf4j.Logger; 021import org.slf4j.LoggerFactory; 022 023@InterfaceAudience.Private 024public class RowIndexEncoderV1 { 025 private static final Logger LOG = LoggerFactory.getLogger(RowIndexEncoderV1.class); 026 027 /** The Cell previously appended. */ 028 private Cell lastCell = null; 029 030 private DataOutputStream out; 031 private NoneEncoder encoder; 032 private int startOffset = -1; 033 private ByteArrayOutputStream rowsOffsetBAOS = new ByteArrayOutputStream( 034 64 * 4); 035 036 public RowIndexEncoderV1(DataOutputStream out, 037 HFileBlockDefaultEncodingContext encodingCtx) { 038 this.out = out; 039 this.encoder = new NoneEncoder(out, encodingCtx); 040 } 041 042 public int write(Cell cell) throws IOException { 043 // checkRow uses comparator to check we are writing in order. 044 if (!checkRow(cell)) { 045 if (startOffset < 0) { 046 startOffset = out.size(); 047 } 048 rowsOffsetBAOS.writeInt(out.size() - startOffset); 049 } 050 lastCell = cell; 051 return encoder.write(cell); 052 } 053 054 protected boolean checkRow(final Cell cell) throws IOException { 055 boolean isDuplicateRow = false; 056 if (cell == null) { 057 throw new IOException("Key cannot be null or empty"); 058 } 059 if (lastCell != null) { 060 int keyComp = CellComparatorImpl.COMPARATOR.compareRows(lastCell, cell); 061 if (keyComp > 0) { 062 throw new IOException("Added a key not lexically larger than" 063 + " previous. Current cell = " + cell + ", lastCell = " + lastCell); 064 } else if (keyComp == 0) { 065 isDuplicateRow = true; 066 } 067 } 068 return isDuplicateRow; 069 } 070 071 public void flush() throws IOException { 072 int onDiskDataSize = 0; 073 if (startOffset >= 0) { 074 onDiskDataSize = out.size() - startOffset; 075 } 076 out.writeInt(rowsOffsetBAOS.size() / 4); 077 if (rowsOffsetBAOS.size() > 0) { 078 out.write(rowsOffsetBAOS.getBuffer(), 0, rowsOffsetBAOS.size()); 079 } 080 out.writeInt(onDiskDataSize); 081 if (LOG.isTraceEnabled()) { 082 LOG.trace("RowNumber: " + rowsOffsetBAOS.size() / 4 083 + ", onDiskDataSize: " + onDiskDataSize + ", totalOnDiskSize: " 084 + (out.size() - startOffset)); 085 } 086 } 087 088}