View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver.wal;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.EOFException;
24  import java.io.IOException;
25  import java.util.Iterator;
26  import java.util.List;
27  import java.util.UUID;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  import org.apache.hadoop.hbase.classification.InterfaceAudience;
32  import org.apache.hadoop.hbase.HBaseInterfaceAudience;
33  import org.apache.hadoop.hbase.HRegionInfo;
34  import org.apache.hadoop.hbase.TableName;
35  import org.apache.hadoop.hbase.util.Bytes;
36  import org.apache.hadoop.hbase.wal.WALKey;
37  import org.apache.hadoop.io.Writable;
38  import org.apache.hadoop.io.WritableUtils;
39  
40  import com.google.common.annotations.VisibleForTesting;
41  
42  /**
43   * A Key for an entry in the change log.
44   *
45   * The log intermingles edits to many tables and rows, so each log entry
46   * identifies the appropriate table and row.  Within a table and row, they're
47   * also sorted.
48   *
49   * <p>Some Transactional edits (START, COMMIT, ABORT) will not have an
50   * associated row.
51   * @deprecated use WALKey
52   */
53  @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
54  @Deprecated
55  public class HLogKey extends WALKey implements Writable {
56    public static final Log LOG = LogFactory.getLog(HLogKey.class);
57  
58    public HLogKey() {
59      super();
60    }
61  
62    @VisibleForTesting
63    public HLogKey(final byte[] encodedRegionName, final TableName tablename, long logSeqNum,
64        final long now, UUID clusterId) {
65      super(encodedRegionName, tablename, logSeqNum, now, clusterId);
66    }
67  
68    public HLogKey(final byte[] encodedRegionName, final TableName tablename) {
69      super(encodedRegionName, tablename);
70    }
71  
72    public HLogKey(final byte[] encodedRegionName, final TableName tablename, final long now) {
73      super(encodedRegionName, tablename, now);
74    }
75  
76    /**
77     * Create the log key for writing to somewhere.
78     * We maintain the tablename mainly for debugging purposes.
79     * A regionName is always a sub-table object.
80     * <p>Used by log splitting and snapshots.
81     *
82     * @param encodedRegionName Encoded name of the region as returned by
83     * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
84     * @param tablename   - name of table
85     * @param logSeqNum   - log sequence number
86     * @param now Time at which this edit was written.
87     * @param clusterIds the clusters that have consumed the change(used in Replication)
88     */
89    public HLogKey(final byte [] encodedRegionName, final TableName tablename,
90        long logSeqNum, final long now, List<UUID> clusterIds, long nonceGroup, long nonce) {
91      super(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, nonce);
92    }
93  
94    /**
95     * Create the log key for writing to somewhere.
96     * We maintain the tablename mainly for debugging purposes.
97     * A regionName is always a sub-table object.
98     *
99     * @param encodedRegionName Encoded name of the region as returned by
100    * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
101    * @param tablename
102    * @param now Time at which this edit was written.
103    * @param clusterIds the clusters that have consumed the change(used in Replication)
104    * @param nonceGroup
105    * @param nonce
106    */
107   public HLogKey(final byte [] encodedRegionName, final TableName tablename,
108       final long now, List<UUID> clusterIds, long nonceGroup, long nonce) {
109     super(encodedRegionName, tablename, now, clusterIds, nonceGroup, nonce);
110   }
111 
112   /**
113    * Create the log key for writing to somewhere.
114    * We maintain the tablename mainly for debugging purposes.
115    * A regionName is always a sub-table object.
116    *
117    * @param encodedRegionName Encoded name of the region as returned by
118    * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
119    * @param tablename
120    * @param logSeqNum
121    * @param nonceGroup
122    * @param nonce
123    */
124   public HLogKey(final byte [] encodedRegionName, final TableName tablename, long logSeqNum,
125       long nonceGroup, long nonce) {
126     super(encodedRegionName, tablename, logSeqNum, nonceGroup, nonce);
127   }
128 
129   @Override
130   @Deprecated
131   public void write(DataOutput out) throws IOException {
132     LOG.warn("HLogKey is being serialized to writable - only expected in test code");
133     WritableUtils.writeVInt(out, VERSION.code);
134     if (compressionContext == null) {
135       Bytes.writeByteArray(out, this.encodedRegionName);
136       Bytes.writeByteArray(out, this.tablename.getName());
137     } else {
138       Compressor.writeCompressed(this.encodedRegionName, 0,
139           this.encodedRegionName.length, out,
140           compressionContext.regionDict);
141       Compressor.writeCompressed(this.tablename.getName(), 0, this.tablename.getName().length, out,
142           compressionContext.tableDict);
143     }
144     out.writeLong(this.logSeqNum);
145     out.writeLong(this.writeTime);
146     // Don't need to write the clusters information as we are using protobufs from 0.95
147     // Writing only the first clusterId for testing the legacy read
148     Iterator<UUID> iterator = clusterIds.iterator();
149     if(iterator.hasNext()){
150       out.writeBoolean(true);
151       UUID clusterId = iterator.next();
152       out.writeLong(clusterId.getMostSignificantBits());
153       out.writeLong(clusterId.getLeastSignificantBits());
154     } else {
155       out.writeBoolean(false);
156     }
157   }
158 
159   @Override
160   public void readFields(DataInput in) throws IOException {
161     Version version = Version.UNVERSIONED;
162     // HLogKey was not versioned in the beginning.
163     // In order to introduce it now, we make use of the fact
164     // that encodedRegionName was written with Bytes.writeByteArray,
165     // which encodes the array length as a vint which is >= 0.
166     // Hence if the vint is >= 0 we have an old version and the vint
167     // encodes the length of encodedRegionName.
168     // If < 0 we just read the version and the next vint is the length.
169     // @see Bytes#readByteArray(DataInput)
170     setScopes(null); // writable HLogKey does not contain scopes
171     int len = WritableUtils.readVInt(in);
172     byte[] tablenameBytes = null;
173     if (len < 0) {
174       // what we just read was the version
175       version = Version.fromCode(len);
176       // We only compress V2 of WALkey.
177       // If compression is on, the length is handled by the dictionary
178       if (compressionContext == null || !version.atLeast(Version.COMPRESSED)) {
179         len = WritableUtils.readVInt(in);
180       }
181     }
182     if (compressionContext == null || !version.atLeast(Version.COMPRESSED)) {
183       this.encodedRegionName = new byte[len];
184       in.readFully(this.encodedRegionName);
185       tablenameBytes = Bytes.readByteArray(in);
186     } else {
187       this.encodedRegionName = Compressor.readCompressed(in, compressionContext.regionDict);
188       tablenameBytes = Compressor.readCompressed(in, compressionContext.tableDict);
189     }
190 
191     this.logSeqNum = in.readLong();
192     this.writeTime = in.readLong();
193 
194     this.clusterIds.clear();
195     if (version.atLeast(Version.INITIAL)) {
196       if (in.readBoolean()) {
197         // read the older log
198         // Definitely is the originating cluster
199         clusterIds.add(new UUID(in.readLong(), in.readLong()));
200       }
201     } else {
202       try {
203         // dummy read (former byte cluster id)
204         in.readByte();
205       } catch(EOFException e) {
206         // Means it's a very old key, just continue
207       }
208     }
209     try {
210       this.tablename = TableName.valueOf(tablenameBytes);
211     } catch (IllegalArgumentException iae) {
212       if (Bytes.toString(tablenameBytes).equals(TableName.OLD_META_STR)) {
213         // It is a pre-namespace meta table edit, continue with new format.
214         LOG.info("Got an old .META. edit, continuing with new format ");
215         this.tablename = TableName.META_TABLE_NAME;
216         this.encodedRegionName = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
217       } else if (Bytes.toString(tablenameBytes).equals(TableName.OLD_ROOT_STR)) {
218         this.tablename = TableName.OLD_ROOT_TABLE_NAME;
219          throw iae;
220       } else throw iae;
221     }
222     // Do not need to read the clusters information as we are using protobufs from 0.95
223   }
224 
225 }