View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.codec.prefixtree.encode.column;
20  
21  import java.io.IOException;
22  import java.io.OutputStream;
23  import java.util.ArrayList;
24  import java.util.List;
25  
26  import org.apache.hadoop.hbase.classification.InterfaceAudience;
27  import org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta;
28  import org.apache.hadoop.hbase.codec.prefixtree.encode.other.ColumnNodeType;
29  import org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.Tokenizer;
30  import org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode;
31  import org.apache.hadoop.hbase.util.CollectionUtils;
32  import org.apache.hadoop.hbase.util.vint.UFIntTool;
33  
34  import com.google.common.collect.Lists;
35  
36  /**
37   * Takes the tokenized family or qualifier data and flattens it into a stream of bytes. The family
38   * section is written after the row section, and qualifier section after family section.
39   * <p/>
40   * The family and qualifier tries, or "column tries", are structured differently than the row trie.
41   * The trie cannot be reassembled without external data about the offsets of the leaf nodes, and
42   * these external pointers are stored in the nubs and leaves of the row trie. For each cell in a
43   * row, the row trie contains a list of offsets into the column sections (along with pointers to
44   * timestamps and other per-cell fields). These offsets point to the last column node/token that
45   * comprises the column name. To assemble the column name, the trie is traversed in reverse (right
46   * to left), with the rightmost tokens pointing to the start of their "parent" node which is the
47   * node to the left.
48   * <p/>
49   * This choice was made to reduce the size of the column trie by storing the minimum amount of
50   * offset data. As a result, to find a specific qualifier within a row, you must do a binary search
51   * of the column nodes, reassembling each one as you search. Future versions of the PrefixTree might
52   * encode the columns in both a forward and reverse trie, which would convert binary searches into
53   * more efficient trie searches which would be beneficial for wide rows.
54   */
55  @InterfaceAudience.Private
56  public class ColumnSectionWriter {
57  
58    public static final int EXPECTED_NUBS_PLUS_LEAVES = 100;
59  
60    /****************** fields ****************************/
61  
62    private PrefixTreeBlockMeta blockMeta;
63  
64    private ColumnNodeType nodeType;
65    private Tokenizer tokenizer;
66    private int numBytes = 0;
67    private ArrayList<TokenizerNode> nonLeaves;
68    private ArrayList<TokenizerNode> leaves;
69    private ArrayList<TokenizerNode> allNodes;
70    private ArrayList<ColumnNodeWriter> columnNodeWriters;
71    private List<Integer> outputArrayOffsets;
72  
73  
74    /*********************** construct *********************/
75  
76    public ColumnSectionWriter() {
77      this.nonLeaves = Lists.newArrayList();
78      this.leaves = Lists.newArrayList();
79      this.outputArrayOffsets = Lists.newArrayList();
80    }
81  
82    public ColumnSectionWriter(PrefixTreeBlockMeta blockMeta, Tokenizer builder,
83        ColumnNodeType nodeType) {
84      this();// init collections
85      reconstruct(blockMeta, builder, nodeType);
86    }
87  
88    public void reconstruct(PrefixTreeBlockMeta blockMeta, Tokenizer builder,
89        ColumnNodeType nodeType) {
90      this.blockMeta = blockMeta;
91      this.tokenizer = builder;
92      this.nodeType = nodeType;
93    }
94  
95    public void reset() {
96      numBytes = 0;
97      nonLeaves.clear();
98      leaves.clear();
99      outputArrayOffsets.clear();
100   }
101 
102 
103   /****************** methods *******************************/
104 
105   public ColumnSectionWriter compile() {
106     if (this.nodeType == ColumnNodeType.FAMILY) {
107       // do nothing. max family length fixed at Byte.MAX_VALUE
108     } else if (this.nodeType == ColumnNodeType.QUALIFIER) {
109       blockMeta.setMaxQualifierLength(tokenizer.getMaxElementLength());
110     } else {
111       blockMeta.setMaxTagsLength(tokenizer.getMaxElementLength());
112     }
113     compilerInternals();
114     return this;
115   }
116 
117   protected void compilerInternals() {
118     tokenizer.setNodeFirstInsertionIndexes();
119     tokenizer.appendNodes(nonLeaves, true, false);
120 
121     tokenizer.appendNodes(leaves, false, true);
122 
123     allNodes = Lists.newArrayListWithCapacity(nonLeaves.size() + leaves.size());
124     allNodes.addAll(nonLeaves);
125     allNodes.addAll(leaves);
126 
127     columnNodeWriters = Lists.newArrayListWithCapacity(CollectionUtils.nullSafeSize(allNodes));
128     for (int i = 0; i < allNodes.size(); ++i) {
129       TokenizerNode node = allNodes.get(i);
130       columnNodeWriters.add(new ColumnNodeWriter(blockMeta, node, this.nodeType));
131     }
132 
133     // leaf widths are known at this point, so add them up
134     int totalBytesWithoutOffsets = 0;
135     for (int i = allNodes.size() - 1; i >= 0; --i) {
136       ColumnNodeWriter columnNodeWriter = columnNodeWriters.get(i);
137       // leaves store all but their first token byte
138       totalBytesWithoutOffsets += columnNodeWriter.getWidthUsingPlaceholderForOffsetWidth(0);
139     }
140 
141     // figure out how wide our offset FInts are
142     int parentOffsetWidth = 0;
143     while (true) {
144       ++parentOffsetWidth;
145       int numBytesFinder = totalBytesWithoutOffsets + parentOffsetWidth * allNodes.size();
146       if (numBytesFinder < UFIntTool.maxValueForNumBytes(parentOffsetWidth)) {
147         numBytes = numBytesFinder;
148         break;
149       }// it fits
150     }
151     if (this.nodeType == ColumnNodeType.FAMILY) {
152       blockMeta.setFamilyOffsetWidth(parentOffsetWidth);
153     } else if (this.nodeType == ColumnNodeType.QUALIFIER) {
154       blockMeta.setQualifierOffsetWidth(parentOffsetWidth);
155     } else {
156       blockMeta.setTagsOffsetWidth(parentOffsetWidth);
157     }
158 
159     int forwardIndex = 0;
160     for (int i = 0; i < allNodes.size(); ++i) {
161       TokenizerNode node = allNodes.get(i);
162       ColumnNodeWriter columnNodeWriter = columnNodeWriters.get(i);
163       int fullNodeWidth = columnNodeWriter
164           .getWidthUsingPlaceholderForOffsetWidth(parentOffsetWidth);
165       node.setOutputArrayOffset(forwardIndex);
166       columnNodeWriter.setTokenBytes(node.getToken());
167       if (node.isRoot()) {
168         columnNodeWriter.setParentStartPosition(0);
169       } else {
170         columnNodeWriter.setParentStartPosition(node.getParent().getOutputArrayOffset());
171       }
172       forwardIndex += fullNodeWidth;
173     }
174 
175     tokenizer.appendOutputArrayOffsets(outputArrayOffsets);
176   }
177 
178   public void writeBytes(OutputStream os) throws IOException {
179     for (ColumnNodeWriter columnNodeWriter : columnNodeWriters) {
180       columnNodeWriter.writeBytes(os);
181     }
182   }
183 
184 
185   /************* get/set **************************/
186 
187   public ArrayList<ColumnNodeWriter> getColumnNodeWriters() {
188     return columnNodeWriters;
189   }
190 
191   public int getNumBytes() {
192     return numBytes;
193   }
194 
195   public int getOutputArrayOffset(int sortedIndex) {
196     return outputArrayOffsets.get(sortedIndex);
197   }
198 
199   public ArrayList<TokenizerNode> getNonLeaves() {
200     return nonLeaves;
201   }
202 
203   public ArrayList<TokenizerNode> getLeaves() {
204     return leaves;
205   }
206 
207 }