001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.mob.mapreduce;
019
020import java.io.IOException;
021import java.util.Arrays;
022import java.util.Base64;
023import java.util.HashSet;
024import java.util.Set;
025import java.util.UUID;
026import org.apache.hadoop.conf.Configuration;
027import org.apache.hadoop.conf.Configured;
028import org.apache.hadoop.fs.FileStatus;
029import org.apache.hadoop.fs.FileSystem;
030import org.apache.hadoop.fs.Path;
031import org.apache.hadoop.hbase.Cell;
032import org.apache.hadoop.hbase.HBaseConfiguration;
033import org.apache.hadoop.hbase.HConstants;
034import org.apache.hadoop.hbase.TableName;
035import org.apache.hadoop.hbase.client.Admin;
036import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
037import org.apache.hadoop.hbase.client.Connection;
038import org.apache.hadoop.hbase.client.ConnectionFactory;
039import org.apache.hadoop.hbase.client.Result;
040import org.apache.hadoop.hbase.client.Scan;
041import org.apache.hadoop.hbase.client.TableDescriptor;
042import org.apache.hadoop.hbase.io.HFileLink;
043import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
044import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
045import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
046import org.apache.hadoop.hbase.mapreduce.TableMapper;
047import org.apache.hadoop.hbase.mob.MobConstants;
048import org.apache.hadoop.hbase.mob.MobUtils;
049import org.apache.hadoop.hbase.util.Bytes;
050import org.apache.hadoop.hbase.util.CommonFSUtils;
051import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
052import org.apache.hadoop.hbase.util.HFileArchiveUtil;
053import org.apache.hadoop.hbase.util.Pair;
054import org.apache.hadoop.io.Text;
055import org.apache.hadoop.mapreduce.Job;
056import org.apache.hadoop.mapreduce.Reducer;
057import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
058import org.apache.hadoop.security.UserGroupInformation;
059import org.apache.hadoop.util.Tool;
060import org.apache.hadoop.util.ToolRunner;
061import org.apache.yetus.audience.InterfaceAudience;
062import org.slf4j.Logger;
063import org.slf4j.LoggerFactory;
064
065/**
066 * Scans a given table + CF for all mob reference cells to get the list of backing mob files. For
067 * each referenced file we attempt to verify that said file is on the FileSystem in a place that the
068 * MOB system will look when attempting to resolve the actual value.
069 * <p/>
070 * The job includes counters that can help provide a rough sketch of the mob data.
071 *
072 * <pre>
073 * Map-Reduce Framework
074 *         Map input records=10000
075 * ...
076 *         Reduce output records=99
077 * ...
078 * CELLS PER ROW
079 *         Number of rows with 1s of cells per row=10000
080 * MOB
081 *         NUM_CELLS=52364
082 * PROBLEM
083 *         Affected rows=338
084 *         Problem MOB files=2
085 * ROWS WITH PROBLEMS PER FILE
086 *         Number of HFiles with 100s of affected rows=2
087 * SIZES OF CELLS
088 *         Number of cells with size in the 10,000s of bytes=627
089 *         Number of cells with size in the 100,000s of bytes=51392
090 *         Number of cells with size in the 1,000,000s of bytes=345
091 * SIZES OF ROWS
092 *         Number of rows with total size in the 100,000s of bytes=6838
093 *         Number of rows with total size in the 1,000,000s of bytes=3162
094 * </pre>
095 * <ol>
096 * <li>Map-Reduce Framework:Map input records - the number of rows with mob references</li>
097 * <li>Map-Reduce Framework:Reduce output records - the number of unique hfiles referenced</li>
098 * <li>MOB:NUM_CELLS - the total number of mob reference cells</li>
099 * <li>PROBLEM:Affected rows - the number of rows that reference hfiles with an issue</li>
100 * <li>PROBLEM:Problem MOB files - the number of unique hfiles that have an issue</li>
101 * <li>CELLS PER ROW: - this counter group gives a histogram of the order of magnitude of the number
102 * of cells in a given row by grouping by the number of digits used in each count. This allows us to
103 * see more about the distribution of cells than what we can determine with just the cell count and
104 * the row count. In this particular example we can see that all of our rows have somewhere between
105 * 1 - 9 cells.</li>
106 * <li>ROWS WITH PROBLEMS PER FILE: - this counter group gives a histogram of the order of magnitude
107 * of the number of rows in each of the hfiles with a problem. e.g. in the example there are 2
108 * hfiles and they each have the same order of magnitude number of rows, specifically between 100
109 * and 999.</li>
110 * <li>SIZES OF CELLS: - this counter group gives a histogram of the order of magnitude of the size
111 * of mob values according to our reference cells. e.g. in the example above we have cell sizes that
112 * are all between 10,000 bytes and 9,999,999 bytes. From this histogram we can also see that _most_
113 * cells are 100,000 - 999,000 bytes and the smaller and bigger ones are outliers making up less
114 * than 2% of mob cells.</li>
115 * <li>SIZES OF ROWS: - this counter group gives a histogram of the order of magnitude of the size
116 * of mob values across each row according to our reference cells. In the example above we have rows
117 * that are are between 100,000 bytes and 9,999,999 bytes. We can also see that about 2/3rd of our
118 * rows are 100,000 - 999,999 bytes.</li>
119 * </ol>
120 * Generates a report that gives one file status per line, with tabs dividing fields.
121 *
122 * <pre>
123 * RESULT OF LOOKUP FILE REF  comma seperated, base64 encoded rows when there's a problem
124 * </pre>
125 *
126 * e.g.
127 *
128 * <pre>
129 * MOB DIR  09c576e28a65ed2ead0004d192ffaa382019110184b30a1c7e034573bf8580aef8393402
130 * MISSING FILE    28e252d7f013973174750d483d358fa020191101f73536e7133f4cd3ab1065edf588d509        MmJiMjMyYzBiMTNjNzc0OTY1ZWY4NTU4ZjBmYmQ2MTUtNTIz,MmEzOGE0YTkzMTZjNDllNWE4MzM1MTdjNDVkMzEwNzAtODg=
131 * </pre>
132 *
133 * Possible results are listed; the first three indicate things are working properly.
134 * <ol>
135 * <li>MOB DIR - the reference is in the normal MOB area for the given table and CF</li>
136 * <li>HLINK TO ARCHIVE FOR SAME TABLE - the reference is present in the archive area for this table
137 * and CF</li>
138 * <li>HLINK TO ARCHIVE FOR OTHER TABLE - the reference is present in a different table and CF,
139 * either in the MOB or archive areas (e.g. from a snapshot restore or clone)</li>
140 * <li>ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE - the reference is currently present in the archive
141 * area for this table and CF, but it is kept there because a _different_ table has a reference to
142 * it (e.g. from a snapshot clone). If these other tables are removed then the file will likely be
143 * deleted unless there is a snapshot also referencing it.</li>
144 * <li>ARCHIVE BUT NO HLINKS - the reference is currently present in the archive for this table and
145 * CF, but there are no references present to prevent its removal. Unless it is newer than the
146 * general TTL (default 5 minutes) or referenced in a snapshot it will be subject to cleaning.</li>
147 * <li>ARCHIVE BUT FAILURE WHILE CHECKING HLINKS - Check the job logs to see why things failed while
148 * looking for why this file is being kept around.</li>
149 * <li>MISSING FILE - We couldn't find the reference on the FileSystem. Either there is dataloss due
150 * to a bug in the MOB storage system or the MOB storage is damaged but in an edge case that allows
151 * it to work for now. You can verify which by doing a raw reference scan to get the referenced
152 * hfile and check the underlying filesystem. See the ref guide section on mob for details.</li>
153 * <li>HLINK BUT POINT TO MISSING FILE - There is a pointer in our mob area for this table and CF to
154 * a file elsewhere on the FileSystem, however the file it points to no longer exists.</li>
155 * <li>MISSING FILE BUT FAILURE WHILE CHECKING HLINKS - We could not find the referenced file,
156 * however you should check the job logs to see why we couldn't check to see if there is a pointer
157 * to the referenced file in our archive or another table's archive or mob area.</li>
158 * </ol>
159 */
160@InterfaceAudience.Private
161public class MobRefReporter extends Configured implements Tool {
162  private static Logger LOG = LoggerFactory.getLogger(MobRefReporter.class);
163  public static final String NAME = "mobrefs";
164  static final String REPORT_JOB_ID = "mob.report.job.id";
165  static final String REPORT_START_DATETIME = "mob.report.job.start";
166
167  public static class MobRefMapper extends TableMapper<Text, ImmutableBytesWritable> {
168    @Override
169    public void map(ImmutableBytesWritable r, Result columns, Context context)
170      throws IOException, InterruptedException {
171      if (columns == null) {
172        return;
173      }
174      Cell[] cells = columns.rawCells();
175      if (cells == null || cells.length == 0) {
176        return;
177      }
178      Set<String> files = new HashSet<>();
179      long count = 0;
180      long size = 0;
181      for (Cell c : cells) {
182        if (MobUtils.hasValidMobRefCellValue(c)) {
183          // TODO confirm there aren't tags
184          String fileName = MobUtils.getMobFileName(c);
185          if (!files.contains(fileName)) {
186            context.write(new Text(fileName), r);
187            files.add(fileName);
188          }
189          final int cellsize = MobUtils.getMobValueLength(c);
190          context
191            .getCounter("SIZES OF CELLS",
192              "Number of cells with size in the " + log10GroupedString(cellsize) + "s of bytes")
193            .increment(1L);
194          size += cellsize;
195          count++;
196        } else {
197          LOG.debug("cell is not a mob ref, even though we asked for only refs. cell={}", c);
198        }
199      }
200      context.getCounter("CELLS PER ROW",
201        "Number of rows with " + log10GroupedString(count) + "s of cells per row").increment(1L);
202      context
203        .getCounter("SIZES OF ROWS",
204          "Number of rows with total size in the " + log10GroupedString(size) + "s of bytes")
205        .increment(1L);
206      context.getCounter("MOB", "NUM_CELLS").increment(count);
207    }
208  }
209
210  public static class MobRefReducer extends Reducer<Text, ImmutableBytesWritable, Text, Text> {
211
212    TableName table;
213    String mobRegion;
214    Path mob;
215    Path archive;
216    String seperator;
217
218    /* Results that mean things are fine */
219    final Text OK_MOB_DIR = new Text("MOB DIR");
220    final Text OK_HLINK_RESTORE = new Text("HLINK TO ARCHIVE FOR SAME TABLE");
221    final Text OK_HLINK_CLONE = new Text("HLINK TO ARCHIVE FOR OTHER TABLE");
222    /* Results that mean something is incorrect */
223    final Text INCONSISTENT_ARCHIVE_BAD_LINK =
224      new Text("ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE");
225    final Text INCONSISTENT_ARCHIVE_STALE = new Text("ARCHIVE BUT NO HLINKS");
226    final Text INCONSISTENT_ARCHIVE_IOE = new Text("ARCHIVE BUT FAILURE WHILE CHECKING HLINKS");
227    /* Results that mean data is probably already gone */
228    final Text DATALOSS_MISSING = new Text("MISSING FILE");
229    final Text DATALOSS_HLINK_DANGLING = new Text("HLINK BUT POINTS TO MISSING FILE");
230    final Text DATALOSS_MISSING_IOE = new Text("MISSING FILE BUT FAILURE WHILE CHECKING HLINKS");
231    final Base64.Encoder base64 = Base64.getEncoder();
232
233    @Override
234    public void setup(Context context) throws IOException, InterruptedException {
235      final Configuration conf = context.getConfiguration();
236      final String tableName = conf.get(TableInputFormat.INPUT_TABLE);
237      if (null == tableName) {
238        throw new IOException("Job configuration did not include table.");
239      }
240      table = TableName.valueOf(tableName);
241      mobRegion = MobUtils.getMobRegionInfo(table).getEncodedName();
242      final String family = conf.get(TableInputFormat.SCAN_COLUMN_FAMILY);
243      if (null == family) {
244        throw new IOException("Job configuration did not include column family");
245      }
246      mob = MobUtils.getMobFamilyPath(conf, table, family);
247      LOG.info("Using active mob area '{}'", mob);
248      archive = HFileArchiveUtil.getStoreArchivePath(conf, table,
249        MobUtils.getMobRegionInfo(table).getEncodedName(), family);
250      LOG.info("Using archive mob area '{}'", archive);
251      seperator = conf.get(TextOutputFormat.SEPERATOR, "\t");
252    }
253
254    @Override
255    public void reduce(Text key, Iterable<ImmutableBytesWritable> rows, Context context)
256      throws IOException, InterruptedException {
257      final Configuration conf = context.getConfiguration();
258      final String file = key.toString();
259      // active mob area
260      if (mob.getFileSystem(conf).exists(new Path(mob, file))) {
261        LOG.debug("Found file '{}' in mob area", file);
262        context.write(OK_MOB_DIR, key);
263        // archive area - is there an hlink back reference (from a snapshot from same table)
264      } else if (archive.getFileSystem(conf).exists(new Path(archive, file))) {
265
266        Path backRefDir = HFileLink.getBackReferencesDir(archive, file);
267        try {
268          FileStatus[] backRefs = CommonFSUtils.listStatus(archive.getFileSystem(conf), backRefDir);
269          if (backRefs != null) {
270            boolean found = false;
271            for (FileStatus backRef : backRefs) {
272              Pair<TableName, String> refParts =
273                HFileLink.parseBackReferenceName(backRef.getPath().getName());
274              if (table.equals(refParts.getFirst()) && mobRegion.equals(refParts.getSecond())) {
275                Path hlinkPath =
276                  HFileLink.getHFileFromBackReference(MobUtils.getMobHome(conf), backRef.getPath());
277                if (hlinkPath.getFileSystem(conf).exists(hlinkPath)) {
278                  found = true;
279                } else {
280                  LOG.warn(
281                    "Found file '{}' in archive area with a back reference to the mob area "
282                      + "for our table, but the mob area does not have a corresponding hfilelink.",
283                    file);
284                }
285              }
286            }
287            if (found) {
288              LOG.debug("Found file '{}' in archive area. has proper hlink back references to "
289                + "suggest it is from a restored snapshot for this table.", file);
290              context.write(OK_HLINK_RESTORE, key);
291            } else {
292              LOG.warn("Found file '{}' in archive area, but the hlink back references do not "
293                + "properly point to the mob area for our table.", file);
294              context.write(INCONSISTENT_ARCHIVE_BAD_LINK, encodeRows(context, key, rows));
295            }
296          } else {
297            LOG.warn("Found file '{}' in archive area, but there are no hlinks pointing to it. Not "
298              + "yet used snapshot or an error.", file);
299            context.write(INCONSISTENT_ARCHIVE_STALE, encodeRows(context, key, rows));
300          }
301        } catch (IOException e) {
302          LOG.warn("Found file '{}' in archive area, but got an error while checking "
303            + "on back references.", file, e);
304          context.write(INCONSISTENT_ARCHIVE_IOE, encodeRows(context, key, rows));
305        }
306
307      } else {
308        // check for an hlink in the active mob area (from a snapshot of a different table)
309        try {
310          /**
311           * we are doing this ourselves instead of using FSUtils.getReferenceFilePaths because we
312           * know the mob region never splits, so we can only have HFileLink references and looking
313           * for just them is cheaper then listing everything. This glob should match the naming
314           * convention for HFileLinks to our referenced hfile. As simplified explanation those file
315           * names look like "table=region-hfile". For details see the
316           * {@link HFileLink#createHFileLinkName HFileLink implementation}.
317           */
318          FileStatus[] hlinks = mob.getFileSystem(conf).globStatus(new Path(mob + "/*=*-" + file));
319          if (hlinks != null && hlinks.length != 0) {
320            if (hlinks.length != 1) {
321              LOG.warn("Found file '{}' as hfilelinks in the mob area, but there are more than "
322                + "one: {}", file, Arrays.deepToString(hlinks));
323            }
324            HFileLink found = null;
325            for (FileStatus hlink : hlinks) {
326              HFileLink tmp = HFileLink.buildFromHFileLinkPattern(conf, hlink.getPath());
327              if (tmp.exists(archive.getFileSystem(conf))) {
328                found = tmp;
329                break;
330              } else {
331                LOG.debug("Target file does not exist for ref {}", tmp);
332              }
333            }
334            if (found != null) {
335              LOG.debug("Found file '{}' as a ref in the mob area: {}", file, found);
336              context.write(OK_HLINK_CLONE, key);
337            } else {
338              LOG.warn("Found file '{}' as ref(s) in the mob area but they do not point to an hfile"
339                + " that exists.", file);
340              context.write(DATALOSS_HLINK_DANGLING, encodeRows(context, key, rows));
341            }
342          } else {
343            LOG.error("Could not find referenced file '{}'. See the docs on this tool.", file);
344            LOG.debug("Note that we don't have the server-side tag from the mob cells that says "
345              + "what table the reference is originally from. So if the HFileLink in this table "
346              + "is missing but the referenced file is still in the table from that tag, then "
347              + "lookups of these impacted rows will work. Do a scan of the reference details "
348              + "of the cell for the hfile name and then check the entire hbase install if this "
349              + "table was made from a snapshot of another table. see the ref guide section on "
350              + "mob for details.");
351            context.write(DATALOSS_MISSING, encodeRows(context, key, rows));
352          }
353        } catch (IOException e) {
354          LOG.error(
355            "Exception while checking mob area of our table for HFileLinks that point to {}", file,
356            e);
357          context.write(DATALOSS_MISSING_IOE, encodeRows(context, key, rows));
358        }
359      }
360    }
361
362    /**
363     * reuses the passed Text key. appends the configured seperator and then a comma seperated list
364     * of base64 encoded row keys
365     */
366    private Text encodeRows(Context context, Text key, Iterable<ImmutableBytesWritable> rows)
367      throws IOException {
368      StringBuilder sb = new StringBuilder(key.toString());
369      sb.append(seperator);
370      boolean moreThanOne = false;
371      long count = 0;
372      for (ImmutableBytesWritable row : rows) {
373        if (moreThanOne) {
374          sb.append(",");
375        }
376        sb.append(base64.encodeToString(row.copyBytes()));
377        moreThanOne = true;
378        count++;
379      }
380      context.getCounter("PROBLEM", "Problem MOB files").increment(1L);
381      context.getCounter("PROBLEM", "Affected rows").increment(count);
382      context
383        .getCounter("ROWS WITH PROBLEMS PER FILE",
384          "Number of HFiles with " + log10GroupedString(count) + "s of affected rows")
385        .increment(1L);
386      key.set(sb.toString());
387      return key;
388    }
389  }
390
391  /**
392   * Returns the string representation of the given number after grouping it into log10 buckets.
393   * e.g. 0-9 -> 1, 10-99 -> 10, ..., 100,000-999,999 -> 100,000, etc.
394   */
395  static String log10GroupedString(long number) {
396    return String.format("%,d", (long) (Math.pow(10d, Math.floor(Math.log10(number)))));
397  }
398
399  /**
400   * Main method for the tool.
401   * @return 0 if success, 1 for bad args. 2 if job aborted with an exception, 3 if mr job was
402   *         unsuccessful
403   */
404  public int run(String[] args) throws IOException, InterruptedException {
405    // TODO make family and table optional
406    if (args.length != 3) {
407      printUsage();
408      return 1;
409    }
410    final String output = args[0];
411    final String tableName = args[1];
412    final String familyName = args[2];
413    final long reportStartTime = EnvironmentEdgeManager.currentTime();
414    Configuration conf = getConf();
415    try {
416      FileSystem fs = FileSystem.get(conf);
417      // check whether the current user is the same one with the owner of hbase root
418      String currentUserName = UserGroupInformation.getCurrentUser().getShortUserName();
419      FileStatus[] hbaseRootFileStat = fs.listStatus(new Path(conf.get(HConstants.HBASE_DIR)));
420      if (hbaseRootFileStat.length > 0) {
421        String owner = hbaseRootFileStat[0].getOwner();
422        if (!owner.equals(currentUserName)) {
423          String errorMsg =
424            "The current user[" + currentUserName + "] does not have hbase root credentials."
425              + " If this job fails due to an inability to read HBase's internal directories, "
426              + "you will need to rerun as a user with sufficient permissions. The HBase superuser "
427              + "is a safe choice.";
428          LOG.warn(errorMsg);
429        }
430      } else {
431        LOG.error("The passed configs point to an HBase dir does not exist: {}",
432          conf.get(HConstants.HBASE_DIR));
433        throw new IOException("The target HBase does not exist");
434      }
435
436      byte[] family;
437      int maxVersions;
438      TableName tn = TableName.valueOf(tableName);
439      try (Connection connection = ConnectionFactory.createConnection(conf);
440        Admin admin = connection.getAdmin()) {
441        TableDescriptor htd = admin.getDescriptor(tn);
442        ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(familyName));
443        if (hcd == null || !hcd.isMobEnabled()) {
444          throw new IOException("Column family " + familyName + " is not a MOB column family");
445        }
446        family = hcd.getName();
447        maxVersions = hcd.getMaxVersions();
448      }
449
450      String id = getClass().getSimpleName() + UUID.randomUUID().toString().replace("-", "");
451      Job job = null;
452      Scan scan = new Scan();
453      scan.addFamily(family);
454      // Do not retrieve the mob data when scanning
455      scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
456      scan.setAttribute(MobConstants.MOB_SCAN_REF_ONLY, Bytes.toBytes(Boolean.TRUE));
457      // If a scanner caching value isn't set, pick a smaller default since we know we're doing
458      // a full table scan and don't want to impact other clients badly.
459      scan.setCaching(conf.getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING, 10000));
460      scan.setCacheBlocks(false);
461      scan.setMaxVersions(maxVersions);
462      conf.set(REPORT_JOB_ID, id);
463
464      job = Job.getInstance(conf);
465      job.setJarByClass(getClass());
466      TableMapReduceUtil.initTableMapperJob(tn, scan, MobRefMapper.class, Text.class,
467        ImmutableBytesWritable.class, job);
468
469      job.setReducerClass(MobRefReducer.class);
470      job.setOutputFormatClass(TextOutputFormat.class);
471      TextOutputFormat.setOutputPath(job, new Path(output));
472
473      job.setJobName(getClass().getSimpleName() + "-" + tn + "-" + familyName);
474      // for use in the reducer. easier than re-parsing it out of the scan string.
475      job.getConfiguration().set(TableInputFormat.SCAN_COLUMN_FAMILY, familyName);
476
477      // Use when we start this job as the base point for file "recency".
478      job.getConfiguration().setLong(REPORT_START_DATETIME, reportStartTime);
479
480      if (job.waitForCompletion(true)) {
481        LOG.info("Finished creating report for '{}', family='{}'", tn, familyName);
482      } else {
483        System.err.println("Job was not successful");
484        return 3;
485      }
486      return 0;
487
488    } catch (ClassNotFoundException | RuntimeException | IOException | InterruptedException e) {
489      System.err.println("Job aborted due to exception " + e);
490      return 2; // job failed
491    }
492  }
493
494  public static void main(String[] args) throws Exception {
495    Configuration conf = HBaseConfiguration.create();
496    int ret = ToolRunner.run(conf, new MobRefReporter(), args);
497    System.exit(ret);
498  }
499
500  private void printUsage() {
501    System.err.println("Usage:\n" + "--------------------------\n" + MobRefReporter.class.getName()
502      + " output-dir tableName familyName");
503    System.err.println(" output-dir       Where to write output report.");
504    System.err.println(" tableName        The table name");
505    System.err.println(" familyName       The column family name");
506  }
507
508}