001/*
002 * Copyright The Apache Software Foundation
003 *
004 * Licensed to the Apache Software Foundation (ASF) under one
005 * or more contributor license agreements.  See the NOTICE file
006 * distributed with this work for additional information
007 * regarding copyright ownership.  The ASF licenses this file
008 * to you under the Apache License, Version 2.0 (the
009 * "License"); you may not use this file except in compliance
010 * with the License.  You may obtain a copy of the License at
011 *
012 *     http://www.apache.org/licenses/LICENSE-2.0
013 *
014 * Unless required by applicable law or agreed to in writing, software
015 * distributed under the License is distributed on an "AS IS" BASIS,
016 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
017 * See the License for the specific language governing permissions and
018 * limitations under the License.
019 */
020package org.apache.hadoop.hbase.io.hfile;
021
022import java.io.DataInputStream;
023import java.io.DataOutput;
024import java.io.IOException;
025import java.io.OutputStream;
026import java.nio.ByteBuffer;
027
028import org.apache.hadoop.hbase.nio.ByteBuff;
029import org.apache.hadoop.hbase.util.Bytes;
030import org.apache.yetus.audience.InterfaceAudience;
031
032/**
033 * Various types of HFile blocks. Ordinal values of these enum constants must not be relied upon.
034 * The values in the enum appear in the order they appear in a version 2 HFile.
035 */
036@InterfaceAudience.Private
037public enum BlockType {
038
039  // Scanned block section
040
041  /** Data block, both versions */
042  DATA("DATABLK*", BlockCategory.DATA),
043
044  /** An encoded data block (e.g. with prefix compression), version 2 */
045  ENCODED_DATA("DATABLKE", BlockCategory.DATA) {
046    @Override
047    public int getId() {
048      return DATA.ordinal();
049    }
050  },
051
052  /** Version 2 leaf index block. Appears in the data block section */
053  LEAF_INDEX("IDXLEAF2", BlockCategory.INDEX),
054
055  /** Bloom filter block, version 2 */
056  BLOOM_CHUNK("BLMFBLK2", BlockCategory.BLOOM),
057
058  // Non-scanned block section
059
060  /** Meta blocks */
061  META("METABLKc", BlockCategory.META),
062
063  /** Intermediate-level version 2 index in the non-data block section */
064  INTERMEDIATE_INDEX("IDXINTE2", BlockCategory.INDEX),
065
066  // Load-on-open section.
067
068  /** Root index block, also used for the single-level meta index, version 2 */
069  ROOT_INDEX("IDXROOT2", BlockCategory.INDEX),
070
071  /** File info, version 2 */
072  FILE_INFO("FILEINF2", BlockCategory.META),
073
074  /** General Bloom filter metadata, version 2 */
075  GENERAL_BLOOM_META("BLMFMET2", BlockCategory.BLOOM),
076
077  /** Delete Family Bloom filter metadata, version 2 */
078  DELETE_FAMILY_BLOOM_META("DFBLMET2", BlockCategory.BLOOM),
079
080  // Trailer
081
082  /** Fixed file trailer, both versions (always just a magic string) */
083  TRAILER("TRABLK\"$", BlockCategory.META),
084
085  // Legacy blocks
086
087  /** Block index magic string in version 1 */
088  INDEX_V1("IDXBLK)+", BlockCategory.INDEX);
089
090  public enum BlockCategory {
091    DATA, META, INDEX, BLOOM, ALL_CATEGORIES, UNKNOWN;
092
093    /**
094     * Throws an exception if the block category passed is the special category
095     * meaning "all categories".
096     */
097    public void expectSpecific() {
098      if (this == ALL_CATEGORIES) {
099        throw new IllegalArgumentException("Expected a specific block " +
100            "category but got " + this);
101      }
102    }
103  }
104
105  public static final int MAGIC_LENGTH = 8;
106
107  private final byte[] magic;
108  private final BlockCategory metricCat;
109
110  private BlockType(String magicStr, BlockCategory metricCat) {
111    magic = Bytes.toBytes(magicStr);
112    this.metricCat = metricCat;
113    assert magic.length == MAGIC_LENGTH;
114  }
115
116  /**
117   * Use this instead of {@link #ordinal()}. They work exactly the same, except
118   * DATA and ENCODED_DATA get the same id using this method (overridden for
119   * {@link #ENCODED_DATA}).
120   * @return block type id from 0 to the number of block types - 1
121   */
122  public int getId() {
123    // Default implementation, can be overridden for individual enum members.
124    return ordinal();
125  }
126
127  public void writeToStream(OutputStream out) throws IOException {
128    out.write(magic);
129  }
130
131  public void write(DataOutput out) throws IOException {
132    out.write(magic);
133  }
134
135  public void write(ByteBuffer buf) {
136    buf.put(magic);
137  }
138
139  public void write(ByteBuff buf) {
140    buf.put(magic);
141  }
142
143  public BlockCategory getCategory() {
144    return metricCat;
145  }
146
147  public static BlockType parse(byte[] buf, int offset, int length)
148      throws IOException {
149    if (length != MAGIC_LENGTH) {
150      throw new IOException("Magic record of invalid length: "
151          + Bytes.toStringBinary(buf, offset, length));
152    }
153
154    for (BlockType blockType : values())
155      if (Bytes.compareTo(blockType.magic, 0, MAGIC_LENGTH, buf, offset,
156          MAGIC_LENGTH) == 0)
157        return blockType;
158
159    throw new IOException("Invalid HFile block magic: "
160        + Bytes.toStringBinary(buf, offset, MAGIC_LENGTH));
161  }
162
163  public static BlockType read(DataInputStream in) throws IOException {
164    byte[] buf = new byte[MAGIC_LENGTH];
165    in.readFully(buf);
166    return parse(buf, 0, buf.length);
167  }
168
169  public static BlockType read(ByteBuff buf) throws IOException {
170    byte[] magicBuf = new byte[Math.min(buf.limit() - buf.position(), MAGIC_LENGTH)];
171    buf.get(magicBuf);
172    BlockType blockType = parse(magicBuf, 0, magicBuf.length);
173    // If we got here, we have read exactly MAGIC_LENGTH bytes.
174    return blockType;
175  }
176
177  /**
178   * Put the magic record out to the specified byte array position.
179   *
180   * @param bytes the byte array
181   * @param offset position in the array
182   * @return incremented offset
183   */
184  public int put(byte[] bytes, int offset) {
185    System.arraycopy(magic, 0, bytes, offset, MAGIC_LENGTH);
186    return offset + MAGIC_LENGTH;
187  }
188
189  /**
190   * Reads a magic record of the length {@link #MAGIC_LENGTH} from the given
191   * stream and expects it to match this block type.
192   */
193  public void readAndCheck(DataInputStream in) throws IOException {
194    byte[] buf = new byte[MAGIC_LENGTH];
195    in.readFully(buf);
196    if (Bytes.compareTo(buf, magic) != 0) {
197      throw new IOException("Invalid magic: expected "
198          + Bytes.toStringBinary(magic) + ", got " + Bytes.toStringBinary(buf));
199    }
200  }
201
202  /**
203   * Reads a magic record of the length {@link #MAGIC_LENGTH} from the given
204   * byte buffer and expects it to match this block type.
205   */
206  public void readAndCheck(ByteBuffer in) throws IOException {
207    byte[] buf = new byte[MAGIC_LENGTH];
208    in.get(buf);
209    if (Bytes.compareTo(buf, magic) != 0) {
210      throw new IOException("Invalid magic: expected "
211          + Bytes.toStringBinary(magic) + ", got " + Bytes.toStringBinary(buf));
212    }
213  }
214
215  /**
216   * @return whether this block type is encoded or unencoded data block
217   */
218  public final boolean isData() {
219    return this == DATA || this == ENCODED_DATA;
220  }
221
222}