View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.mapreduce.replication;
20  
21  import java.io.IOException;
22  
23  import org.apache.commons.logging.Log;
24  import org.apache.commons.logging.LogFactory;
25  import org.apache.hadoop.conf.Configuration;
26  import org.apache.hadoop.conf.Configured;
27  import org.apache.hadoop.hbase.*;
28  import org.apache.hadoop.hbase.client.HConnectable;
29  import org.apache.hadoop.hbase.client.HConnection;
30  import org.apache.hadoop.hbase.client.HConnectionManager;
31  import org.apache.hadoop.hbase.client.HTable;
32  import org.apache.hadoop.hbase.client.Put;
33  import org.apache.hadoop.hbase.client.Result;
34  import org.apache.hadoop.hbase.client.ResultScanner;
35  import org.apache.hadoop.hbase.client.Scan;
36  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
37  import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
38  import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
39  import org.apache.hadoop.hbase.mapreduce.TableMapper;
40  import org.apache.hadoop.hbase.replication.ReplicationException;
41  import org.apache.hadoop.hbase.replication.ReplicationFactory;
42  import org.apache.hadoop.hbase.replication.ReplicationPeer;
43  import org.apache.hadoop.hbase.replication.ReplicationPeers;
44  import org.apache.hadoop.hbase.util.Bytes;
45  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
46  import org.apache.hadoop.hbase.zookeeper.ZKUtil;
47  import org.apache.hadoop.mapreduce.Job;
48  import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
49  import org.apache.hadoop.util.Tool;
50  import org.apache.hadoop.util.ToolRunner;
51  
52  /**
53   * This map-only job compares the data from a local table with a remote one.
54   * Every cell is compared and must have exactly the same keys (even timestamp)
55   * as well as same value. It is possible to restrict the job by time range and
56   * families. The peer id that's provided must match the one given when the
57   * replication stream was setup.
58   * <p>
59   * Two counters are provided, Verifier.Counters.GOODROWS and BADROWS. The reason
60   * for a why a row is different is shown in the map's log.
61   */
62  public class VerifyReplication extends Configured implements Tool {
63  
64    private static final Log LOG =
65        LogFactory.getLog(VerifyReplication.class);
66  
67    public final static String NAME = "verifyrep";
68    static long startTime = 0;
69    static long endTime = Long.MAX_VALUE;
70    static String tableName = null;
71    static String families = null;
72    static String peerId = null;
73  
74    /**
75     * Map-only comparator for 2 tables
76     */
77    public static class Verifier
78        extends TableMapper<ImmutableBytesWritable, Put> {
79  
80      public static enum Counters {GOODROWS, BADROWS}
81  
82      private ResultScanner replicatedScanner;
83  
84      /**
85       * Map method that compares every scanned row with the equivalent from
86       * a distant cluster.
87       * @param row  The current table row key.
88       * @param value  The columns.
89       * @param context  The current context.
90       * @throws IOException When something is broken with the data.
91       */
92      @Override
93      public void map(ImmutableBytesWritable row, final Result value,
94                      Context context)
95          throws IOException {
96        if (replicatedScanner == null) {
97          Configuration conf = context.getConfiguration();
98          final Scan scan = new Scan();
99          scan.setCaching(conf.getInt(TableInputFormat.SCAN_CACHEDROWS, 1));
100         long startTime = conf.getLong(NAME + ".startTime", 0);
101         long endTime = conf.getLong(NAME + ".endTime", Long.MAX_VALUE);
102         String families = conf.get(NAME + ".families", null);
103         if(families != null) {
104           String[] fams = families.split(",");
105           for(String fam : fams) {
106             scan.addFamily(Bytes.toBytes(fam));
107           }
108         }
109         scan.setTimeRange(startTime, endTime);
110         HConnectionManager.execute(new HConnectable<Void>(conf) {
111           @Override
112           public Void connect(HConnection conn) throws IOException {
113             String zkClusterKey = conf.get(NAME + ".peerQuorumAddress");
114             Configuration peerConf = HBaseConfiguration.create(conf);
115             ZKUtil.applyClusterKeyToConf(peerConf, zkClusterKey);
116 
117             HTable replicatedTable = new HTable(peerConf, conf.get(NAME + ".tableName"));
118             scan.setStartRow(value.getRow());
119             replicatedScanner = replicatedTable.getScanner(scan);
120             return null;
121           }
122         });
123       }
124       Result res = replicatedScanner.next();
125       try {
126         Result.compareResults(value, res);
127         context.getCounter(Counters.GOODROWS).increment(1);
128       } catch (Exception e) {
129         LOG.warn("Bad row", e);
130         context.getCounter(Counters.BADROWS).increment(1);
131       }
132     }
133 
134     protected void cleanup(Context context) {
135       if (replicatedScanner != null) {
136         replicatedScanner.close();
137         replicatedScanner = null;
138       }
139     }
140   }
141 
142   private static String getPeerQuorumAddress(final Configuration conf) throws IOException {
143     ZooKeeperWatcher localZKW = null;
144     ReplicationPeer peer = null;
145     try {
146       localZKW = new ZooKeeperWatcher(conf, "VerifyReplication",
147           new Abortable() {
148             @Override public void abort(String why, Throwable e) {}
149             @Override public boolean isAborted() {return false;}
150           });
151 
152       ReplicationPeers rp = ReplicationFactory.getReplicationPeers(localZKW, conf, localZKW);
153       rp.init();
154 
155       Configuration peerConf = rp.getPeerConf(peerId);
156       if (peerConf == null) {
157         throw new IOException("Couldn't get peer conf!");
158       }
159 
160       return ZKUtil.getZooKeeperClusterKey(peerConf);
161     } catch (ReplicationException e) {
162       throw new IOException(
163           "An error occured while trying to connect to the remove peer cluster", e);
164     } finally {
165       if (peer != null) {
166         peer.close();
167       }
168       if (localZKW != null) {
169         localZKW.close();
170       }
171     }
172   }
173 
174   /**
175    * Sets up the actual job.
176    *
177    * @param conf  The current configuration.
178    * @param args  The command line parameters.
179    * @return The newly created job.
180    * @throws java.io.IOException When setting up the job fails.
181    */
182   public static Job createSubmittableJob(Configuration conf, String[] args)
183   throws IOException {
184     if (!doCommandLine(args)) {
185       return null;
186     }
187     if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY,
188         HConstants.REPLICATION_ENABLE_DEFAULT)) {
189       throw new IOException("Replication needs to be enabled to verify it.");
190     }
191     conf.set(NAME+".peerId", peerId);
192     conf.set(NAME+".tableName", tableName);
193     conf.setLong(NAME+".startTime", startTime);
194     conf.setLong(NAME+".endTime", endTime);
195     if (families != null) {
196       conf.set(NAME+".families", families);
197     }
198 
199     String peerQuorumAddress = getPeerQuorumAddress(conf);
200     conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress);
201     LOG.info("Peer Quorum Address: " + peerQuorumAddress);
202 
203     Job job = new Job(conf, NAME + "_" + tableName);
204     job.setJarByClass(VerifyReplication.class);
205 
206     Scan scan = new Scan();
207     scan.setTimeRange(startTime, endTime);
208     if(families != null) {
209       String[] fams = families.split(",");
210       for(String fam : fams) {
211         scan.addFamily(Bytes.toBytes(fam));
212       }
213     }
214     TableMapReduceUtil.initTableMapperJob(tableName, scan,
215         Verifier.class, null, null, job);
216 
217     // Obtain the auth token from peer cluster
218     TableMapReduceUtil.initCredentialsForCluster(job, peerQuorumAddress);
219 
220     job.setOutputFormatClass(NullOutputFormat.class);
221     job.setNumReduceTasks(0);
222     return job;
223   }
224 
225   private static boolean doCommandLine(final String[] args) {
226     if (args.length < 2) {
227       printUsage(null);
228       return false;
229     }
230     try {
231       for (int i = 0; i < args.length; i++) {
232         String cmd = args[i];
233         if (cmd.equals("-h") || cmd.startsWith("--h")) {
234           printUsage(null);
235           return false;
236         }
237 
238         final String startTimeArgKey = "--starttime=";
239         if (cmd.startsWith(startTimeArgKey)) {
240           startTime = Long.parseLong(cmd.substring(startTimeArgKey.length()));
241           continue;
242         }
243 
244         final String endTimeArgKey = "--endtime=";
245         if (cmd.startsWith(endTimeArgKey)) {
246           endTime = Long.parseLong(cmd.substring(endTimeArgKey.length()));
247           continue;
248         }
249 
250         final String familiesArgKey = "--families=";
251         if (cmd.startsWith(familiesArgKey)) {
252           families = cmd.substring(familiesArgKey.length());
253           continue;
254         }
255 
256         if (i == args.length-2) {
257           peerId = cmd;
258         }
259 
260         if (i == args.length-1) {
261           tableName = cmd;
262         }
263       }
264     } catch (Exception e) {
265       e.printStackTrace();
266       printUsage("Can't start because " + e.getMessage());
267       return false;
268     }
269     return true;
270   }
271 
272   /*
273    * @param errorMsg Error message.  Can be null.
274    */
275   private static void printUsage(final String errorMsg) {
276     if (errorMsg != null && errorMsg.length() > 0) {
277       System.err.println("ERROR: " + errorMsg);
278     }
279     System.err.println("Usage: verifyrep [--starttime=X]" +
280         " [--stoptime=Y] [--families=A] <peerid> <tablename>");
281     System.err.println();
282     System.err.println("Options:");
283     System.err.println(" starttime    beginning of the time range");
284     System.err.println("              without endtime means from starttime to forever");
285     System.err.println(" endtime      end of the time range");
286     System.err.println(" families     comma-separated list of families to copy");
287     System.err.println();
288     System.err.println("Args:");
289     System.err.println(" peerid       Id of the peer used for verification, must match the one given for replication");
290     System.err.println(" tablename    Name of the table to verify");
291     System.err.println();
292     System.err.println("Examples:");
293     System.err.println(" To verify the data replicated from TestTable for a 1 hour window with peer #5 ");
294     System.err.println(" $ bin/hbase " +
295         "org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication" +
296         " --starttime=1265875194289 --endtime=1265878794289 5 TestTable ");
297   }
298 
299   @Override
300   public int run(String[] args) throws Exception {
301     Configuration conf = this.getConf();
302     Job job = createSubmittableJob(conf, args);
303     if (job != null) {
304       return job.waitForCompletion(true) ? 0 : 1;
305     } 
306     return 1;
307   }
308 
309   /**
310    * Main entry point.
311    *
312    * @param args  The command line parameters.
313    * @throws Exception When running the job fails.
314    */
315   public static void main(String[] args) throws Exception {
316     int res = ToolRunner.run(HBaseConfiguration.create(), new VerifyReplication(), args);
317     System.exit(res);
318   }
319 }