View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.io;
20  
21  import java.io.BufferedInputStream;
22  import java.io.DataInput;
23  import java.io.DataInputStream;
24  import java.io.IOException;
25  import java.io.InputStream;
26  
27  import com.google.protobuf.HBaseZeroCopyByteString;
28  import org.apache.hadoop.classification.InterfaceAudience;
29  import org.apache.hadoop.fs.FSDataOutputStream;
30  import org.apache.hadoop.fs.FileSystem;
31  import org.apache.hadoop.fs.Path;
32  import org.apache.hadoop.hbase.KeyValue;
33  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
34  import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
35  import org.apache.hadoop.hbase.util.Bytes;
36  
37  /**
38   * A reference to the top or bottom half of a store file where 'bottom' is the first half
39   * of the file containing the keys that sort lowest and 'top' is the second half
40   * of the file with keys that sort greater than those of the bottom half.  The file referenced
41   * lives under a different region.  References are made at region split time.
42   *
43   * <p>References work with a special half store file type.  References know how
44   * to write out the reference format in the file system and are what is juggled
45   * when references are mixed in with direct store files.  The half store file
46   * type is used reading the referred to file.
47   *
48   * <p>References to store files located over in some other region look like
49   * this in the file system
50   * <code>1278437856009925445.3323223323</code>:
51   * i.e. an id followed by hash of the referenced region.
52   * Note, a region is itself not splittable if it has instances of store file
53   * references.  References are cleaned up by compactions.
54   */
55  @InterfaceAudience.Private
56  public class Reference {
57    private byte [] splitkey;
58    private Range region;
59  
60    /**
61     * For split HStoreFiles, it specifies if the file covers the lower half or
62     * the upper half of the key range
63     */
64    static enum Range {
65      /** HStoreFile contains upper half of key range */
66      top,
67      /** HStoreFile contains lower half of key range */
68      bottom
69    }
70  
71    /**
72     * @param splitRow
73     * @return A {@link Reference} that points at top half of a an hfile
74     */
75    public static Reference createTopReference(final byte [] splitRow) {
76      return new Reference(splitRow, Range.top);
77    }
78  
79    /**
80     * @param splitRow
81     * @return A {@link Reference} that points at the bottom half of a an hfile
82     */
83    public static Reference createBottomReference(final byte [] splitRow) {
84      return new Reference(splitRow, Range.bottom);
85    }
86  
87    /**
88     * Constructor
89     * @param splitRow This is row we are splitting around.
90     * @param fr
91     */
92    Reference(final byte [] splitRow, final Range fr) {
93      this.splitkey = splitRow == null?  null: KeyValue.createFirstOnRow(splitRow).getKey();
94      this.region = fr;
95    }
96  
97    /**
98     * Used by serializations.
99     */
100   @Deprecated
101   // Make this private when it comes time to let go of this constructor.  Needed by pb serialization.
102   public Reference() {
103     this(null, Range.bottom);
104   }
105 
106   /**
107    *
108    * @return Range
109    */
110   public Range getFileRegion() {
111     return this.region;
112   }
113 
114   /**
115    * @return splitKey
116    */
117   public byte [] getSplitKey() {
118     return splitkey;
119   }
120 
121   /**
122    * @see java.lang.Object#toString()
123    */
124   @Override
125   public String toString() {
126     return "" + this.region;
127   }
128 
129   public static boolean isTopFileRegion(final Range r) {
130     return r.equals(Range.top);
131   }
132 
133   /**
134    * @deprecated Writables are going away. Use the pb serialization methods instead.
135    * Remove in a release after 0.96 goes out.  This is here only to migrate
136    * old Reference files written with Writables before 0.96.
137    */
138   @Deprecated
139   public void readFields(DataInput in) throws IOException {
140     boolean tmp = in.readBoolean();
141     // If true, set region to top.
142     this.region = tmp? Range.top: Range.bottom;
143     this.splitkey = Bytes.readByteArray(in);
144   }
145 
146   public Path write(final FileSystem fs, final Path p)
147   throws IOException {
148     FSDataOutputStream out = fs.create(p, false);
149     try {
150       out.write(toByteArray());
151     } finally {
152       out.close();
153     }
154     return p;
155   }
156 
157   /**
158    * Read a Reference from FileSystem.
159    * @param fs
160    * @param p
161    * @return New Reference made from passed <code>p</code>
162    * @throws IOException
163    */
164   public static Reference read(final FileSystem fs, final Path p)
165   throws IOException {
166     InputStream in = fs.open(p);
167     try {
168       // I need to be able to move back in the stream if this is not a pb serialization so I can
169       // do the Writable decoding instead.
170       in = in.markSupported()? in: new BufferedInputStream(in);
171       int pblen = ProtobufUtil.lengthOfPBMagic();
172       in.mark(pblen);
173       byte [] pbuf = new byte[pblen];
174       int read = in.read(pbuf);
175       if (read != pblen) throw new IOException("read=" + read + ", wanted=" + pblen);
176       // WATCHOUT! Return in middle of function!!!
177       if (ProtobufUtil.isPBMagicPrefix(pbuf)) return convert(FSProtos.Reference.parseFrom(in));
178       // Else presume Writables.  Need to reset the stream since it didn't start w/ pb.
179       // We won't bother rewriting thie Reference as a pb since Reference is transitory.
180       in.reset();
181       Reference r = new Reference();
182       DataInputStream dis = new DataInputStream(in);
183       // Set in = dis so it gets the close below in the finally on our way out.
184       in = dis;
185       r.readFields(dis);
186       return r;
187     } finally {
188       in.close();
189     }
190   }
191 
192   FSProtos.Reference convert() {
193     FSProtos.Reference.Builder builder = FSProtos.Reference.newBuilder();
194     builder.setRange(isTopFileRegion(getFileRegion())?
195       FSProtos.Reference.Range.TOP: FSProtos.Reference.Range.BOTTOM);
196     builder.setSplitkey(HBaseZeroCopyByteString.wrap(getSplitKey()));
197     return builder.build();
198   }
199 
200   static Reference convert(final FSProtos.Reference r) {
201     Reference result = new Reference();
202     result.splitkey = r.getSplitkey().toByteArray();
203     result.region = r.getRange() == FSProtos.Reference.Range.TOP? Range.top: Range.bottom;
204     return result;
205   }
206 
207   /**
208    * Use this when writing to a stream and you want to use the pb mergeDelimitedFrom
209    * (w/o the delimiter, pb reads to EOF which may not be what you want).
210    * @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
211    * @throws IOException
212    */
213   byte [] toByteArray() throws IOException {
214     return ProtobufUtil.prependPBMagic(convert().toByteArray());
215   }
216 }