View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver.wal;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.EOFException;
24  import java.io.IOException;
25  import java.util.Iterator;
26  import java.util.List;
27  import java.util.UUID;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  import org.apache.hadoop.hbase.classification.InterfaceAudience;
32  import org.apache.hadoop.hbase.HBaseInterfaceAudience;
33  import org.apache.hadoop.hbase.HRegionInfo;
34  import org.apache.hadoop.hbase.TableName;
35  import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
36  import org.apache.hadoop.hbase.util.Bytes;
37  import org.apache.hadoop.hbase.wal.WALKey;
38  import org.apache.hadoop.io.Writable;
39  import org.apache.hadoop.io.WritableUtils;
40  
41  import com.google.common.annotations.VisibleForTesting;
42  
43  /**
44   * A Key for an entry in the change log.
45   *
46   * The log intermingles edits to many tables and rows, so each log entry
47   * identifies the appropriate table and row.  Within a table and row, they're
48   * also sorted.
49   *
50   * <p>Some Transactional edits (START, COMMIT, ABORT) will not have an
51   * associated row.
52   * @deprecated use WALKey
53   */
54  @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
55  @Deprecated
56  public class HLogKey extends WALKey implements Writable {
57    private static final Log LOG = LogFactory.getLog(HLogKey.class);
58  
59    public HLogKey() {
60      super();
61    }
62  
63    @VisibleForTesting
64    public HLogKey(final byte[] encodedRegionName, final TableName tablename, long logSeqNum,
65        final long now, UUID clusterId) {
66      super(encodedRegionName, tablename, logSeqNum, now, clusterId);
67    }
68  
69    public HLogKey(final byte[] encodedRegionName, final TableName tablename) {
70      super(encodedRegionName, tablename);
71    }
72  
73    @VisibleForTesting
74    public HLogKey(final byte[] encodedRegionName, final TableName tablename, final long now) {
75      super(encodedRegionName, tablename, now);
76    }
77  
78    public HLogKey(final byte[] encodedRegionName,
79                   final TableName tablename,
80                   final long now,
81                   final MultiVersionConcurrencyControl mvcc) {
82      super(encodedRegionName, tablename, now, mvcc);
83    }
84  
85    /**
86     * Create the log key for writing to somewhere.
87     * We maintain the tablename mainly for debugging purposes.
88     * A regionName is always a sub-table object.
89     * <p>Used by log splitting and snapshots.
90     *
91     * @param encodedRegionName Encoded name of the region as returned by
92     * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
93     * @param tablename   - name of table
94     * @param logSeqNum   - log sequence number
95     * @param now Time at which this edit was written.
96     * @param clusterIds the clusters that have consumed the change(used in Replication)
97     */
98    public HLogKey(
99        final byte[] encodedRegionName,
100       final TableName tablename,
101       long logSeqNum,
102       final long now,
103       List<UUID> clusterIds,
104       long nonceGroup,
105       long nonce,
106       MultiVersionConcurrencyControl mvcc) {
107     super(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, nonce, mvcc);
108   }
109 
110   /**
111    * Create the log key for writing to somewhere.
112    * We maintain the tablename mainly for debugging purposes.
113    * A regionName is always a sub-table object.
114    *
115    * @param encodedRegionName Encoded name of the region as returned by
116    * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
117    * @param tablename
118    * @param now Time at which this edit was written.
119    * @param clusterIds the clusters that have consumed the change(used in Replication)
120    * @param nonceGroup
121    * @param nonce
122    */
123   public HLogKey(final byte[] encodedRegionName,
124                  final TableName tablename,
125                  final long now,
126                  List<UUID> clusterIds,
127                  long nonceGroup,
128                  long nonce,
129                  final MultiVersionConcurrencyControl mvcc) {
130     super(encodedRegionName, tablename, now, clusterIds, nonceGroup, nonce, mvcc);
131   }
132 
133   /**
134    * Create the log key for writing to somewhere.
135    * We maintain the tablename mainly for debugging purposes.
136    * A regionName is always a sub-table object.
137    *
138    * @param encodedRegionName Encoded name of the region as returned by
139    * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
140    * @param tablename
141    * @param logSeqNum
142    * @param nonceGroup
143    * @param nonce
144    */
145   public HLogKey(final byte [] encodedRegionName, final TableName tablename, long logSeqNum,
146       long nonceGroup, long nonce, MultiVersionConcurrencyControl mvcc) {
147     super(encodedRegionName, tablename, logSeqNum, nonceGroup, nonce, mvcc);
148   }
149 
150   /**
151    * @deprecated Don't use these Writables methods. Use PB instead.
152    */
153   @Override
154   @Deprecated
155   public void write(DataOutput out) throws IOException {
156     LOG.warn("HLogKey is being serialized to writable - only expected in test code");
157     WritableUtils.writeVInt(out, VERSION.code);
158     if (compressionContext == null) {
159       Bytes.writeByteArray(out, this.encodedRegionName);
160       Bytes.writeByteArray(out, this.tablename.getName());
161     } else {
162       Compressor.writeCompressed(this.encodedRegionName, 0,
163           this.encodedRegionName.length, out,
164           compressionContext.regionDict);
165       Compressor.writeCompressed(this.tablename.getName(), 0,
166           this.tablename.getName().length, out,
167           compressionContext.tableDict);
168     }
169     out.writeLong(this.logSeqNum);
170     out.writeLong(this.writeTime);
171     // Don't need to write the clusters information as we are using protobufs from 0.95
172     // Writing only the first clusterId for testing the legacy read
173     Iterator<UUID> iterator = clusterIds.iterator();
174     if(iterator.hasNext()){
175       out.writeBoolean(true);
176       UUID clusterId = iterator.next();
177       out.writeLong(clusterId.getMostSignificantBits());
178       out.writeLong(clusterId.getLeastSignificantBits());
179     } else {
180       out.writeBoolean(false);
181     }
182   }
183 
184   @Override
185   public void readFields(DataInput in) throws IOException {
186     Version version = Version.UNVERSIONED;
187     // HLogKey was not versioned in the beginning.
188     // In order to introduce it now, we make use of the fact
189     // that encodedRegionName was written with Bytes.writeByteArray,
190     // which encodes the array length as a vint which is >= 0.
191     // Hence if the vint is >= 0 we have an old version and the vint
192     // encodes the length of encodedRegionName.
193     // If < 0 we just read the version and the next vint is the length.
194     // @see Bytes#readByteArray(DataInput)
195     setScopes(null); // writable HLogKey does not contain scopes
196     int len = WritableUtils.readVInt(in);
197     byte[] tablenameBytes = null;
198     if (len < 0) {
199       // what we just read was the version
200       version = Version.fromCode(len);
201       // We only compress V2 of WALkey.
202       // If compression is on, the length is handled by the dictionary
203       if (compressionContext == null || !version.atLeast(Version.COMPRESSED)) {
204         len = WritableUtils.readVInt(in);
205       }
206     }
207     if (compressionContext == null || !version.atLeast(Version.COMPRESSED)) {
208       this.encodedRegionName = new byte[len];
209       in.readFully(this.encodedRegionName);
210       tablenameBytes = Bytes.readByteArray(in);
211     } else {
212       this.encodedRegionName = Compressor.readCompressed(in, compressionContext.regionDict);
213       tablenameBytes = Compressor.readCompressed(in, compressionContext.tableDict);
214     }
215 
216     this.logSeqNum = in.readLong();
217     this.writeTime = in.readLong();
218 
219     this.clusterIds.clear();
220     if (version.atLeast(Version.INITIAL)) {
221       if (in.readBoolean()) {
222         // read the older log
223         // Definitely is the originating cluster
224         clusterIds.add(new UUID(in.readLong(), in.readLong()));
225       }
226     } else {
227       try {
228         // dummy read (former byte cluster id)
229         in.readByte();
230       } catch(EOFException e) {
231         // Means it's a very old key, just continue
232         if (LOG.isTraceEnabled()) LOG.trace(e);
233       }
234     }
235     try {
236       this.tablename = TableName.valueOf(tablenameBytes);
237     } catch (IllegalArgumentException iae) {
238       if (Bytes.toString(tablenameBytes).equals(TableName.OLD_META_STR)) {
239         // It is a pre-namespace meta table edit, continue with new format.
240         LOG.info("Got an old .META. edit, continuing with new format ");
241         this.tablename = TableName.META_TABLE_NAME;
242         this.encodedRegionName = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
243       } else if (Bytes.toString(tablenameBytes).equals(TableName.OLD_ROOT_STR)) {
244         this.tablename = TableName.OLD_ROOT_TABLE_NAME;
245          throw iae;
246       } else throw iae;
247     }
248     // Do not need to read the clusters information as we are using protobufs from 0.95
249   }
250 
251 }