View Javadoc

1   /**
2    * Copyright 2009 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.regionserver.wal;
21  
22  import java.io.DataInput;
23  import java.io.DataInputStream;
24  import java.io.DataOutput;
25  import java.io.DataOutputStream;
26  import java.io.IOException;
27  import java.util.ArrayList;
28  import java.util.List;
29  import java.util.NavigableMap;
30  import java.util.TreeMap;
31  import java.util.UUID;
32  
33  import org.apache.hadoop.hbase.KeyValue;
34  import org.apache.hadoop.hbase.codec.Decoder;
35  import org.apache.hadoop.hbase.codec.Encoder;
36  import org.apache.hadoop.hbase.io.HeapSize;
37  import org.apache.hadoop.hbase.util.Bytes;
38  import org.apache.hadoop.hbase.util.ClassSize;
39  import org.apache.hadoop.io.Writable;
40  
41  /**
42   * WALEdit: Used in HBase's transaction log (WAL) to represent
43   * the collection of edits (KeyValue objects) corresponding to a
44   * single transaction. The class implements "Writable" interface
45   * for serializing/deserializing a set of KeyValue items.
46   *
47   * Previously, if a transaction contains 3 edits to c1, c2, c3 for a row R,
48   * the HLog would have three log entries as follows:
49   *
50   *    <logseq1-for-edit1>:<KeyValue-for-edit-c1>
51   *    <logseq2-for-edit2>:<KeyValue-for-edit-c2>
52   *    <logseq3-for-edit3>:<KeyValue-for-edit-c3>
53   *
54   * This presents problems because row level atomicity of transactions
55   * was not guaranteed. If we crash after few of the above appends make
56   * it, then recovery will restore a partial transaction.
57   *
58   * In the new world, all the edits for a given transaction are written
59   * out as a single record, for example:
60   *
61   *   <logseq#-for-entire-txn>:<WALEdit-for-entire-txn>
62   *
63   * where, the WALEdit is serialized as:
64   *   <-1, # of edits, <KeyValue>, <KeyValue>, ... >
65   * For example:
66   *   <-1, 3, <Keyvalue-for-edit-c1>, <KeyValue-for-edit-c2>, <KeyValue-for-edit-c3>>
67   *
68   * The -1 marker is just a special way of being backward compatible with
69   * an old HLog which would have contained a single <KeyValue>.
70   *
71   * The deserializer for WALEdit backward compatibly detects if the record
72   * is an old style KeyValue or the new style WALEdit.
73   *
74   */
75  public class WALEdit implements Writable, HeapSize {
76  
77    /*
78     * The cluster id of the cluster which has consumed the change represented by this class is
79     * prefixed with the value of this variable while storing in the scopes variable. This is to
80     * ensure that the cluster ids don't interfere with the column family replication settings stored
81     * in the scopes. The value is chosen to start with period as the column families can't start with
82     * it.
83     */
84    private static final String PREFIX_CLUSTER_KEY = ".";
85    private final int VERSION_2 = -1;
86  
87    private final ArrayList<KeyValue> kvs = new ArrayList<KeyValue>(1);
88  
89    /**
90     * This variable contains the information of the column family replication settings and contains
91     * the clusters that have already consumed the change represented by the object. This overloading
92     * of scopes with the consumed clusterids was introduced while porting the fix for HBASE-7709 back
93     * to 0.94 release. However, this overloading has been removed in the newer releases(0.95.2+). To
94     * check/change the column family settings, please use the getFromScope and putIntoScope methods
95     * and for marking/checking if a cluster has consumed the change, please use addCluster,
96     * addClusters and getClusters methods.
97     */
98    private final NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(
99        Bytes.BYTES_COMPARATOR);
100 
101   // default to decoding uncompressed data - needed for replication, which enforces that
102   // uncompressed edits are sent across the wire. In the regular case (reading/writing WAL), the
103   // codec will be setup by the reader/writer class, not here.
104   private WALEditCodec codec = new WALEditCodec();
105 
106   public WALEdit() {
107   }
108 
109   /**
110    * {@link #setCodec(WALEditCodec)} must be called before calling this method.
111    * @param compression the {@link CompressionContext} for the underlying codec.
112    */
113   @SuppressWarnings("javadoc")
114   public void setCompressionContext(final CompressionContext compression) {
115     this.codec.setCompression(compression);
116   }
117 
118   public void setCodec(WALEditCodec codec) {
119     this.codec = codec;
120   }
121 
122 
123   public void add(KeyValue kv) {
124     this.kvs.add(kv);
125   }
126 
127   public boolean isEmpty() {
128     return kvs.isEmpty();
129   }
130 
131   public int size() {
132     return kvs.size();
133   }
134 
135   public List<KeyValue> getKeyValues() {
136     return kvs;
137   }
138 
139   public Integer getFromScope(byte[] key) {
140     return scopes.get(key);
141   }
142 
143   /**
144    * @return the underlying replication scope map
145    * @deprecated use {@link #getFromScope(byte[])} instead
146    */
147   @Deprecated
148   public NavigableMap<byte[], Integer> getScopes() {
149     return scopes;
150   }
151 
152   /**
153    * @param scopes set all the replication scope information. Must be non-<tt>null</tt>
154    * @deprecated use {@link #putIntoScope(byte[], Integer)} instead. This completely overrides any
155    *             existing scopes
156    */
157   @Deprecated
158   public void setScopes(NavigableMap<byte[], Integer> scopes) {
159     this.scopes.clear();
160     this.scopes.putAll(scopes);
161   }
162 
163   public void putIntoScope(byte[] key, Integer value) {
164     scopes.put(key, value);
165   }
166 
167   public boolean hasKeyInScope(byte[] key) {
168     return scopes.containsKey(key);
169   }
170 
171   /**
172    * @return true if the cluster with the given clusterId has consumed the change.
173    */
174   public boolean hasClusterId(UUID clusterId) {
175     return hasKeyInScope(Bytes.toBytes(PREFIX_CLUSTER_KEY + clusterId.toString()));
176   }
177 
178   /**
179    * Marks that the cluster with the given clusterId has consumed the change.
180    */
181   public void addClusterId(UUID clusterId) {
182     scopes.put(Bytes.toBytes(PREFIX_CLUSTER_KEY + clusterId.toString()), 1);
183   }
184 
185   /**
186    * Marks that the clusters with the given clusterIds have consumed the change.
187    */
188   public void addClusterIds(List<UUID> clusterIds) {
189     for (UUID clusterId : clusterIds) {
190       addClusterId(clusterId);
191     }
192   }
193 
194   /**
195    * @return the set of cluster Ids that have consumed the change.
196    */
197   public List<UUID> getClusterIds() {
198     List<UUID> clusterIds = new ArrayList<UUID>();
199     for (byte[] keyBytes : scopes.keySet()) {
200       String key = Bytes.toString(keyBytes);
201       if (key.startsWith(PREFIX_CLUSTER_KEY)) {
202         clusterIds.add(UUID.fromString(key.substring(PREFIX_CLUSTER_KEY.length())));
203       }
204     }
205     return clusterIds;
206   }
207 
208   public void readFields(DataInput in) throws IOException {
209     kvs.clear();
210     scopes.clear();
211     Decoder decoder = this.codec.getDecoder((DataInputStream) in);
212     int versionOrLength = in.readInt();
213     int length = versionOrLength;
214 
215     // make sure we get the real length
216     if (versionOrLength == VERSION_2) {
217       length = in.readInt();
218     }
219 
220     // read in all the key values
221     kvs.ensureCapacity(length);
222     for(int i=0; i< length && decoder.advance(); i++) {
223       kvs.add(decoder.current());
224     }
225 
226     //its a new style WAL, so we need replication scopes too
227     if (versionOrLength == VERSION_2) {
228       int numEntries = in.readInt();
229       if (numEntries > 0) {
230         for (int i = 0; i < numEntries; i++) {
231           byte[] key = Bytes.readByteArray(in);
232           int scope = in.readInt();
233           scopes.put(key, scope);
234         }
235       }
236     }
237   }
238 
239   public void write(DataOutput out) throws IOException {
240     Encoder kvEncoder = codec.getEncoder((DataOutputStream) out);
241     out.writeInt(VERSION_2);
242 
243     //write out the keyvalues
244     out.writeInt(kvs.size());
245     for(KeyValue kv: kvs){
246       kvEncoder.write(kv);
247     }
248     kvEncoder.flush();
249 
250     out.writeInt(scopes.size());
251     for (byte[] key : scopes.keySet()) {
252       Bytes.writeByteArray(out, key);
253       out.writeInt(scopes.get(key));
254     }
255   }
256 
257   public long heapSize() {
258     long ret = ClassSize.ARRAYLIST;
259     for (KeyValue kv : kvs) {
260       ret += kv.heapSize();
261     }
262     ret += ClassSize.TREEMAP;
263     ret += ClassSize.align(scopes.size() * ClassSize.MAP_ENTRY);
264     // TODO this isn't quite right, need help here
265     return ret;
266   }
267 
268   public String toString() {
269     StringBuilder sb = new StringBuilder();
270 
271     sb.append("[#edits: " + kvs.size() + " = <");
272     for (KeyValue kv : kvs) {
273       sb.append(kv.toString());
274       sb.append("; ");
275     }
276     sb.append(" scopes: " + scopes.toString());
277     sb.append(">]");
278     return sb.toString();
279   }
280 }