View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.codec.prefixtree.encode.column;
20  
21  import java.io.IOException;
22  import java.io.OutputStream;
23  import java.util.ArrayList;
24  import java.util.List;
25  
26  import org.apache.hadoop.hbase.classification.InterfaceAudience;
27  import org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta;
28  import org.apache.hadoop.hbase.codec.prefixtree.encode.other.ColumnNodeType;
29  import org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.Tokenizer;
30  import org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode;
31  import org.apache.hadoop.hbase.util.CollectionUtils;
32  import org.apache.hadoop.hbase.util.vint.UFIntTool;
33  
34  import com.google.common.collect.Lists;
35  
36  /**
37   * <p>
38   * Takes the tokenized family or qualifier data and flattens it into a stream of bytes. The family
39   * section is written after the row section, and qualifier section after family section.
40   * </p>
41   * The family and qualifier tries, or "column tries", are structured differently than the row trie.
42   * The trie cannot be reassembled without external data about the offsets of the leaf nodes, and
43   * these external pointers are stored in the nubs and leaves of the row trie. For each cell in a
44   * row, the row trie contains a list of offsets into the column sections (along with pointers to
45   * timestamps and other per-cell fields). These offsets point to the last column node/token that
46   * comprises the column name. To assemble the column name, the trie is traversed in reverse (right
47   * to left), with the rightmost tokens pointing to the start of their "parent" node which is the
48   * node to the left.
49   * <p>
50   * This choice was made to reduce the size of the column trie by storing the minimum amount of
51   * offset data. As a result, to find a specific qualifier within a row, you must do a binary search
52   * of the column nodes, reassembling each one as you search. Future versions of the PrefixTree might
53   * encode the columns in both a forward and reverse trie, which would convert binary searches into
54   * more efficient trie searches which would be beneficial for wide rows.
55   * </p>
56   */
57  @InterfaceAudience.Private
58  public class ColumnSectionWriter {
59  
60    public static final int EXPECTED_NUBS_PLUS_LEAVES = 100;
61  
62    /****************** fields ****************************/
63  
64    private PrefixTreeBlockMeta blockMeta;
65  
66    private ColumnNodeType nodeType;
67    private Tokenizer tokenizer;
68    private int numBytes = 0;
69    private ArrayList<TokenizerNode> nonLeaves;
70    private ArrayList<TokenizerNode> leaves;
71    private ArrayList<TokenizerNode> allNodes;
72    private ArrayList<ColumnNodeWriter> columnNodeWriters;
73    private List<Integer> outputArrayOffsets;
74  
75  
76    /*********************** construct *********************/
77  
78    public ColumnSectionWriter() {
79      this.nonLeaves = Lists.newArrayList();
80      this.leaves = Lists.newArrayList();
81      this.outputArrayOffsets = Lists.newArrayList();
82    }
83  
84    public ColumnSectionWriter(PrefixTreeBlockMeta blockMeta, Tokenizer builder,
85        ColumnNodeType nodeType) {
86      this();// init collections
87      reconstruct(blockMeta, builder, nodeType);
88    }
89  
90    public void reconstruct(PrefixTreeBlockMeta blockMeta, Tokenizer builder,
91        ColumnNodeType nodeType) {
92      this.blockMeta = blockMeta;
93      this.tokenizer = builder;
94      this.nodeType = nodeType;
95    }
96  
97    public void reset() {
98      numBytes = 0;
99      nonLeaves.clear();
100     leaves.clear();
101     outputArrayOffsets.clear();
102   }
103 
104 
105   /****************** methods *******************************/
106 
107   public ColumnSectionWriter compile() {
108     if (this.nodeType == ColumnNodeType.FAMILY) {
109       // do nothing. max family length fixed at Byte.MAX_VALUE
110     } else if (this.nodeType == ColumnNodeType.QUALIFIER) {
111       blockMeta.setMaxQualifierLength(tokenizer.getMaxElementLength());
112     } else {
113       blockMeta.setMaxTagsLength(tokenizer.getMaxElementLength());
114     }
115     compilerInternals();
116     return this;
117   }
118 
119   protected void compilerInternals() {
120     tokenizer.setNodeFirstInsertionIndexes();
121     tokenizer.appendNodes(nonLeaves, true, false);
122 
123     tokenizer.appendNodes(leaves, false, true);
124 
125     allNodes = Lists.newArrayListWithCapacity(nonLeaves.size() + leaves.size());
126     allNodes.addAll(nonLeaves);
127     allNodes.addAll(leaves);
128 
129     columnNodeWriters = Lists.newArrayListWithCapacity(CollectionUtils.nullSafeSize(allNodes));
130     for (int i = 0; i < allNodes.size(); ++i) {
131       TokenizerNode node = allNodes.get(i);
132       columnNodeWriters.add(new ColumnNodeWriter(blockMeta, node, this.nodeType));
133     }
134 
135     // leaf widths are known at this point, so add them up
136     int totalBytesWithoutOffsets = 0;
137     for (int i = allNodes.size() - 1; i >= 0; --i) {
138       ColumnNodeWriter columnNodeWriter = columnNodeWriters.get(i);
139       // leaves store all but their first token byte
140       totalBytesWithoutOffsets += columnNodeWriter.getWidthUsingPlaceholderForOffsetWidth(0);
141     }
142 
143     // figure out how wide our offset FInts are
144     int parentOffsetWidth = 0;
145     while (true) {
146       ++parentOffsetWidth;
147       int numBytesFinder = totalBytesWithoutOffsets + parentOffsetWidth * allNodes.size();
148       if (numBytesFinder < UFIntTool.maxValueForNumBytes(parentOffsetWidth)) {
149         numBytes = numBytesFinder;
150         break;
151       }// it fits
152     }
153     if (this.nodeType == ColumnNodeType.FAMILY) {
154       blockMeta.setFamilyOffsetWidth(parentOffsetWidth);
155     } else if (this.nodeType == ColumnNodeType.QUALIFIER) {
156       blockMeta.setQualifierOffsetWidth(parentOffsetWidth);
157     } else {
158       blockMeta.setTagsOffsetWidth(parentOffsetWidth);
159     }
160 
161     int forwardIndex = 0;
162     for (int i = 0; i < allNodes.size(); ++i) {
163       TokenizerNode node = allNodes.get(i);
164       ColumnNodeWriter columnNodeWriter = columnNodeWriters.get(i);
165       int fullNodeWidth = columnNodeWriter
166           .getWidthUsingPlaceholderForOffsetWidth(parentOffsetWidth);
167       node.setOutputArrayOffset(forwardIndex);
168       columnNodeWriter.setTokenBytes(node.getToken());
169       if (node.isRoot()) {
170         columnNodeWriter.setParentStartPosition(0);
171       } else {
172         columnNodeWriter.setParentStartPosition(node.getParent().getOutputArrayOffset());
173       }
174       forwardIndex += fullNodeWidth;
175     }
176 
177     tokenizer.appendOutputArrayOffsets(outputArrayOffsets);
178   }
179 
180   public void writeBytes(OutputStream os) throws IOException {
181     for (ColumnNodeWriter columnNodeWriter : columnNodeWriters) {
182       columnNodeWriter.writeBytes(os);
183     }
184   }
185 
186 
187   /************* get/set **************************/
188 
189   public ArrayList<ColumnNodeWriter> getColumnNodeWriters() {
190     return columnNodeWriters;
191   }
192 
193   public int getNumBytes() {
194     return numBytes;
195   }
196 
197   public int getOutputArrayOffset(int sortedIndex) {
198     return outputArrayOffsets.get(sortedIndex);
199   }
200 
201   public ArrayList<TokenizerNode> getNonLeaves() {
202     return nonLeaves;
203   }
204 
205   public ArrayList<TokenizerNode> getLeaves() {
206     return leaves;
207   }
208 
209 }