View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver.wal;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.EOFException;
24  import java.io.IOException;
25  import java.util.Iterator;
26  import java.util.List;
27  import java.util.UUID;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  import org.apache.hadoop.hbase.classification.InterfaceAudience;
32  import org.apache.hadoop.hbase.HBaseInterfaceAudience;
33  import org.apache.hadoop.hbase.HRegionInfo;
34  import org.apache.hadoop.hbase.TableName;
35  import org.apache.hadoop.hbase.util.Bytes;
36  import org.apache.hadoop.hbase.wal.WALKey;
37  import org.apache.hadoop.io.Writable;
38  import org.apache.hadoop.io.WritableUtils;
39  
40  import com.google.common.annotations.VisibleForTesting;
41  
42  /**
43   * A Key for an entry in the change log.
44   *
45   * The log intermingles edits to many tables and rows, so each log entry
46   * identifies the appropriate table and row.  Within a table and row, they're
47   * also sorted.
48   *
49   * <p>Some Transactional edits (START, COMMIT, ABORT) will not have an
50   * associated row.
51   * @deprecated use WALKey
52   */
53  @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
54  @Deprecated
55  public class HLogKey extends WALKey implements Writable {
56    public static final Log LOG = LogFactory.getLog(HLogKey.class);
57  
58    public HLogKey() {
59      super();
60    }
61  
62    @VisibleForTesting
63    public HLogKey(final byte[] encodedRegionName, final TableName tablename, long logSeqNum,
64        final long now, UUID clusterId) {
65      super(encodedRegionName, tablename, logSeqNum, now, clusterId);
66    }
67  
68    public HLogKey(final byte[] encodedRegionName, final TableName tablename) {
69      super(encodedRegionName, tablename);
70    }
71  
72    public HLogKey(final byte[] encodedRegionName, final TableName tablename, final long now) {
73      super(encodedRegionName, tablename, now);
74    }
75  
76    /**
77     * Create the log key for writing to somewhere.
78     * We maintain the tablename mainly for debugging purposes.
79     * A regionName is always a sub-table object.
80     * <p>Used by log splitting and snapshots.
81     *
82     * @param encodedRegionName Encoded name of the region as returned by
83     * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
84     * @param tablename   - name of table
85     * @param logSeqNum   - log sequence number
86     * @param now Time at which this edit was written.
87     * @param clusterIds the clusters that have consumed the change(used in Replication)
88     */
89    public HLogKey(final byte [] encodedRegionName, final TableName tablename,
90        long logSeqNum, final long now, List<UUID> clusterIds, long nonceGroup, long nonce) {
91      super(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, nonce);
92    }
93  
94    /**
95     * Create the log key for writing to somewhere.
96     * We maintain the tablename mainly for debugging purposes.
97     * A regionName is always a sub-table object.
98     *
99     * @param encodedRegionName Encoded name of the region as returned by
100    * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
101    * @param tablename
102    * @param now Time at which this edit was written.
103    * @param clusterIds the clusters that have consumed the change(used in Replication)
104    * @param nonceGroup
105    * @param nonce
106    */
107   public HLogKey(final byte [] encodedRegionName, final TableName tablename,
108       final long now, List<UUID> clusterIds, long nonceGroup, long nonce) {
109     super(encodedRegionName, tablename, now, clusterIds, nonceGroup, nonce);
110   }
111 
112   /**
113    * Create the log key for writing to somewhere.
114    * We maintain the tablename mainly for debugging purposes.
115    * A regionName is always a sub-table object.
116    *
117    * @param encodedRegionName Encoded name of the region as returned by
118    * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
119    * @param tablename
120    * @param logSeqNum
121    * @param nonceGroup
122    * @param nonce
123    */
124   public HLogKey(final byte [] encodedRegionName, final TableName tablename, long logSeqNum,
125       long nonceGroup, long nonce) {
126     super(encodedRegionName, tablename, logSeqNum, nonceGroup, nonce);
127   }
128 
129   /**
130    * @deprecated Don't use these Writables methods. Use PB instead.
131    */
132   @Override
133   @Deprecated
134   public void write(DataOutput out) throws IOException {
135     LOG.warn("HLogKey is being serialized to writable - only expected in test code");
136     WritableUtils.writeVInt(out, VERSION.code);
137     if (compressionContext == null) {
138       Bytes.writeByteArray(out, this.encodedRegionName);
139       Bytes.writeByteArray(out, this.tablename.getName());
140     } else {
141       Compressor.writeCompressed(this.encodedRegionName, 0,
142           this.encodedRegionName.length, out,
143           compressionContext.regionDict);
144       Compressor.writeCompressed(this.tablename.getName(), 0, this.tablename.getName().length, out,
145           compressionContext.tableDict);
146     }
147     out.writeLong(this.logSeqNum);
148     out.writeLong(this.writeTime);
149     // Don't need to write the clusters information as we are using protobufs from 0.95
150     // Writing only the first clusterId for testing the legacy read
151     Iterator<UUID> iterator = clusterIds.iterator();
152     if(iterator.hasNext()){
153       out.writeBoolean(true);
154       UUID clusterId = iterator.next();
155       out.writeLong(clusterId.getMostSignificantBits());
156       out.writeLong(clusterId.getLeastSignificantBits());
157     } else {
158       out.writeBoolean(false);
159     }
160   }
161 
162   @Override
163   public void readFields(DataInput in) throws IOException {
164     Version version = Version.UNVERSIONED;
165     // HLogKey was not versioned in the beginning.
166     // In order to introduce it now, we make use of the fact
167     // that encodedRegionName was written with Bytes.writeByteArray,
168     // which encodes the array length as a vint which is >= 0.
169     // Hence if the vint is >= 0 we have an old version and the vint
170     // encodes the length of encodedRegionName.
171     // If < 0 we just read the version and the next vint is the length.
172     // @see Bytes#readByteArray(DataInput)
173     setScopes(null); // writable HLogKey does not contain scopes
174     int len = WritableUtils.readVInt(in);
175     byte[] tablenameBytes = null;
176     if (len < 0) {
177       // what we just read was the version
178       version = Version.fromCode(len);
179       // We only compress V2 of WALkey.
180       // If compression is on, the length is handled by the dictionary
181       if (compressionContext == null || !version.atLeast(Version.COMPRESSED)) {
182         len = WritableUtils.readVInt(in);
183       }
184     }
185     if (compressionContext == null || !version.atLeast(Version.COMPRESSED)) {
186       this.encodedRegionName = new byte[len];
187       in.readFully(this.encodedRegionName);
188       tablenameBytes = Bytes.readByteArray(in);
189     } else {
190       this.encodedRegionName = Compressor.readCompressed(in, compressionContext.regionDict);
191       tablenameBytes = Compressor.readCompressed(in, compressionContext.tableDict);
192     }
193 
194     this.logSeqNum = in.readLong();
195     this.writeTime = in.readLong();
196 
197     this.clusterIds.clear();
198     if (version.atLeast(Version.INITIAL)) {
199       if (in.readBoolean()) {
200         // read the older log
201         // Definitely is the originating cluster
202         clusterIds.add(new UUID(in.readLong(), in.readLong()));
203       }
204     } else {
205       try {
206         // dummy read (former byte cluster id)
207         in.readByte();
208       } catch(EOFException e) {
209         // Means it's a very old key, just continue
210         if (LOG.isTraceEnabled()) LOG.trace(e);
211       }
212     }
213     try {
214       this.tablename = TableName.valueOf(tablenameBytes);
215     } catch (IllegalArgumentException iae) {
216       if (Bytes.toString(tablenameBytes).equals(TableName.OLD_META_STR)) {
217         // It is a pre-namespace meta table edit, continue with new format.
218         LOG.info("Got an old .META. edit, continuing with new format ");
219         this.tablename = TableName.META_TABLE_NAME;
220         this.encodedRegionName = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
221       } else if (Bytes.toString(tablenameBytes).equals(TableName.OLD_ROOT_STR)) {
222         this.tablename = TableName.OLD_ROOT_TABLE_NAME;
223          throw iae;
224       } else throw iae;
225     }
226     // Do not need to read the clusters information as we are using protobufs from 0.95
227   }
228 
229 }