001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.util;
020
021import static org.junit.Assert.assertEquals;
022import static org.junit.Assert.fail;
023
024import java.io.IOException;
025import java.util.ArrayList;
026import java.util.Collection;
027import java.util.EnumSet;
028import java.util.HashMap;
029import java.util.List;
030import java.util.Map;
031import java.util.Optional;
032import java.util.concurrent.CountDownLatch;
033import java.util.concurrent.ExecutorService;
034import java.util.concurrent.ScheduledThreadPoolExecutor;
035import org.apache.hadoop.conf.Configuration;
036import org.apache.hadoop.fs.FileStatus;
037import org.apache.hadoop.fs.FileSystem;
038import org.apache.hadoop.fs.Path;
039import org.apache.hadoop.hbase.ClusterMetrics;
040import org.apache.hadoop.hbase.ClusterMetrics.Option;
041import org.apache.hadoop.hbase.HBaseTestingUtil;
042import org.apache.hadoop.hbase.HConstants;
043import org.apache.hadoop.hbase.HRegionLocation;
044import org.apache.hadoop.hbase.MetaTableAccessor;
045import org.apache.hadoop.hbase.ServerName;
046import org.apache.hadoop.hbase.TableName;
047import org.apache.hadoop.hbase.client.Admin;
048import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
049import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
050import org.apache.hadoop.hbase.client.Connection;
051import org.apache.hadoop.hbase.client.ConnectionFactory;
052import org.apache.hadoop.hbase.client.Delete;
053import org.apache.hadoop.hbase.client.Put;
054import org.apache.hadoop.hbase.client.RegionInfo;
055import org.apache.hadoop.hbase.client.RegionLocator;
056import org.apache.hadoop.hbase.client.Scan;
057import org.apache.hadoop.hbase.client.Table;
058import org.apache.hadoop.hbase.client.TableDescriptor;
059import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
060import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
061import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
062import org.apache.hadoop.hbase.coprocessor.MasterObserver;
063import org.apache.hadoop.hbase.coprocessor.ObserverContext;
064import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
065import org.apache.hadoop.hbase.master.assignment.RegionStates;
066import org.apache.hadoop.hbase.mob.MobFileName;
067import org.apache.hadoop.hbase.mob.MobUtils;
068import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
069import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
070import org.apache.zookeeper.KeeperException;
071import org.junit.rules.TestName;
072import org.slf4j.Logger;
073import org.slf4j.LoggerFactory;
074
075/**
076 * This is the base class for  HBaseFsck's ability to detect reasons for inconsistent tables.
077 *
078 * Actual tests are in :
079 * TestHBaseFsckTwoRS
080 * TestHBaseFsckOneRS
081 * TestHBaseFsckMOB
082 * TestHBaseFsckReplicas
083 */
084public class BaseTestHBaseFsck {
085  static final int POOL_SIZE = 7;
086  protected static final Logger LOG = LoggerFactory.getLogger(BaseTestHBaseFsck.class);
087  protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
088  protected final static Configuration conf = TEST_UTIL.getConfiguration();
089  protected final static String FAM_STR = "fam";
090  protected final static byte[] FAM = Bytes.toBytes(FAM_STR);
091  protected final static int REGION_ONLINE_TIMEOUT = 800;
092  protected static AssignmentManager assignmentManager;
093  protected static RegionStates regionStates;
094  protected static ExecutorService tableExecutorService;
095  protected static ScheduledThreadPoolExecutor hbfsckExecutorService;
096  protected static Connection connection;
097  protected static Admin admin;
098
099  // for the instance, reset every test run
100  protected Table tbl;
101  protected final static byte[][] SPLITS = new byte[][] { Bytes.toBytes("A"),
102    Bytes.toBytes("B"), Bytes.toBytes("C") };
103  // one row per region.
104  protected final static byte[][] ROWKEYS= new byte[][] {
105    Bytes.toBytes("00"), Bytes.toBytes("50"), Bytes.toBytes("A0"), Bytes.toBytes("A5"),
106    Bytes.toBytes("B0"), Bytes.toBytes("B5"), Bytes.toBytes("C0"), Bytes.toBytes("C5") };
107
108  /**
109   * Debugging method to dump the contents of meta.
110   */
111  protected void dumpMeta(TableName tableName) throws IOException {
112    List<RegionInfo> regions =
113      MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tableName);
114    for (RegionInfo region : regions) {
115      LOG.info(region.getRegionNameAsString());
116    }
117  }
118
119  /**
120   * This method is used to undeploy a region -- close it and attempt to
121   * remove its state from the Master.
122   */
123  protected void undeployRegion(Connection conn, ServerName sn,
124      RegionInfo hri) throws IOException, InterruptedException {
125    try {
126      HBaseFsckRepair.closeRegionSilentlyAndWait(conn, sn, hri);
127      if (!hri.isMetaRegion()) {
128        admin.offline(hri.getRegionName());
129      }
130    } catch (IOException ioe) {
131      LOG.warn("Got exception when attempting to offline region "
132          + Bytes.toString(hri.getRegionName()), ioe);
133    }
134  }
135  /**
136   * Delete a region from assignments, meta, or completely from hdfs.
137   * @param unassign if true unassign region if assigned
138   * @param metaRow  if true remove region's row from META
139   * @param hdfs if true remove region's dir in HDFS
140   */
141  protected void deleteRegion(Configuration conf, final TableDescriptor htd,
142      byte[] startKey, byte[] endKey, boolean unassign, boolean metaRow,
143      boolean hdfs) throws IOException, InterruptedException {
144    deleteRegion(conf, htd, startKey, endKey, unassign, metaRow, hdfs, false,
145        RegionInfo.DEFAULT_REPLICA_ID);
146  }
147
148  /**
149   * Delete a region from assignments, meta, or completely from hdfs.
150   * @param unassign if true unassign region if assigned
151   * @param metaRow  if true remove region's row from META
152   * @param hdfs if true remove region's dir in HDFS
153   * @param regionInfoOnly if true remove a region dir's .regioninfo file
154   * @param replicaId replica id
155   */
156  protected void deleteRegion(Configuration conf, final TableDescriptor htd,
157      byte[] startKey, byte[] endKey, boolean unassign, boolean metaRow,
158      boolean hdfs, boolean regionInfoOnly, int replicaId)
159          throws IOException, InterruptedException {
160    LOG.info("** Before delete:");
161    dumpMeta(htd.getTableName());
162
163    List<HRegionLocation> locations;
164    try(RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
165      locations = rl.getAllRegionLocations();
166    }
167
168    for (HRegionLocation location : locations) {
169      RegionInfo hri = location.getRegion();
170      ServerName hsa = location.getServerName();
171      if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
172          && Bytes.compareTo(hri.getEndKey(), endKey) == 0
173          && hri.getReplicaId() == replicaId) {
174
175        LOG.info("RegionName: " +hri.getRegionNameAsString());
176        byte[] deleteRow = hri.getRegionName();
177
178        if (unassign) {
179          LOG.info("Undeploying region " + hri + " from server " + hsa);
180          undeployRegion(connection, hsa, hri);
181        }
182
183        if (regionInfoOnly) {
184          LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString());
185          Path rootDir = CommonFSUtils.getRootDir(conf);
186          FileSystem fs = rootDir.getFileSystem(conf);
187          Path p = new Path(CommonFSUtils.getTableDir(rootDir, htd.getTableName()),
188              hri.getEncodedName());
189          Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
190          fs.delete(hriPath, true);
191        }
192
193        if (hdfs) {
194          LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
195          Path rootDir = CommonFSUtils.getRootDir(conf);
196          FileSystem fs = rootDir.getFileSystem(conf);
197          Path p = new Path(CommonFSUtils.getTableDir(rootDir, htd.getTableName()),
198              hri.getEncodedName());
199          HBaseFsck.debugLsr(conf, p);
200          boolean success = fs.delete(p, true);
201          LOG.info("Deleted " + p + " sucessfully? " + success);
202          HBaseFsck.debugLsr(conf, p);
203        }
204
205        if (metaRow) {
206          try (Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService)) {
207            Delete delete = new Delete(deleteRow);
208            meta.delete(delete);
209          }
210        }
211      }
212      LOG.info(hri.toString() + hsa.toString());
213    }
214
215    LOG.info("*** After delete:");
216    dumpMeta(htd.getTableName());
217  }
218
219  /**
220   * Setup a clean table before we start mucking with it.
221   *
222   * It will set tbl which needs to be closed after test
223   *
224   * @throws IOException
225   * @throws InterruptedException
226   * @throws KeeperException
227   */
228  void setupTable(TableName tablename) throws Exception {
229    setupTableWithRegionReplica(tablename, 1);
230  }
231
232  /**
233   * Setup a clean table with a certain region_replica count
234   *
235   * It will set tbl which needs to be closed after test
236   *
237   * @throws Exception
238   */
239  void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception {
240    TableDescriptorBuilder tableDescriptorBuilder =
241      TableDescriptorBuilder.newBuilder(tablename);
242    ColumnFamilyDescriptor columnFamilyDescriptor =
243      ColumnFamilyDescriptorBuilder.newBuilder(FAM).build();
244    tableDescriptorBuilder.setRegionReplication(replicaCount);
245    // If a table has no CF's it doesn't get checked
246    tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor);
247    createTable(TEST_UTIL, tableDescriptorBuilder.build(), SPLITS);
248
249    tbl = connection.getTable(tablename, tableExecutorService);
250    List<Put> puts = new ArrayList<>(ROWKEYS.length);
251    for (byte[] row : ROWKEYS) {
252      Put p = new Put(row);
253      p.addColumn(FAM, Bytes.toBytes("val"), row);
254      puts.add(p);
255    }
256    tbl.put(puts);
257  }
258
259  /**
260   * Setup a clean table with a mob-enabled column.
261   *
262   * @param tablename The name of a table to be created.
263   * @throws Exception
264   */
265  void setupMobTable(TableName tablename) throws Exception {
266    TableDescriptorBuilder tableDescriptorBuilder =
267      TableDescriptorBuilder.newBuilder(tablename);
268    ColumnFamilyDescriptor columnFamilyDescriptor =
269      ColumnFamilyDescriptorBuilder
270        .newBuilder(FAM)
271        .setMobEnabled(true)
272        .setMobThreshold(0).build();
273    // If a table has no CF's it doesn't get checked
274    tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor);
275    createTable(TEST_UTIL, tableDescriptorBuilder.build(), SPLITS);
276
277    tbl = connection.getTable(tablename, tableExecutorService);
278    List<Put> puts = new ArrayList<>(ROWKEYS.length);
279    for (byte[] row : ROWKEYS) {
280      Put p = new Put(row);
281      p.addColumn(FAM, Bytes.toBytes("val"), row);
282      puts.add(p);
283    }
284    tbl.put(puts);
285  }
286
287  /**
288   * Counts the number of rows to verify data loss or non-dataloss.
289   */
290  int countRows() throws IOException {
291     return TEST_UTIL.countRows(tbl);
292  }
293
294  /**
295   * Counts the number of rows to verify data loss or non-dataloss.
296   */
297  int countRows(byte[] start, byte[] end) throws IOException {
298    return TEST_UTIL.countRows(tbl, new Scan().withStartRow(start).withStopRow(end));
299  }
300
301  /**
302   * delete table in preparation for next test
303   */
304  void cleanupTable(TableName tablename) throws Exception {
305    if (tbl != null) {
306      tbl.close();
307      tbl = null;
308    }
309    connection.clearRegionLocationCache();
310    deleteTable(TEST_UTIL, tablename);
311  }
312
313  /**
314   * Get region info from local cluster.
315   */
316  Map<ServerName, List<String>> getDeployedHRIs(final Admin admin) throws IOException {
317    ClusterMetrics status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
318    Collection<ServerName> regionServers = status.getLiveServerMetrics().keySet();
319    Map<ServerName, List<String>> mm = new HashMap<>();
320    for (ServerName hsi : regionServers) {
321      // list all online regions from this region server
322      List<RegionInfo> regions = admin.getRegions(hsi);
323      List<String> regionNames = new ArrayList<>(regions.size());
324      for (RegionInfo hri : regions) {
325        regionNames.add(hri.getRegionNameAsString());
326      }
327      mm.put(hsi, regionNames);
328    }
329    return mm;
330  }
331
332  /**
333   * Returns the HSI a region info is on.
334   */
335  ServerName findDeployedHSI(Map<ServerName, List<String>> mm, RegionInfo hri) {
336    for (Map.Entry<ServerName,List <String>> e : mm.entrySet()) {
337      if (e.getValue().contains(hri.getRegionNameAsString())) {
338        return e.getKey();
339      }
340    }
341    return null;
342  }
343
344  public void deleteTableDir(TableName table) throws IOException {
345    Path rootDir = CommonFSUtils.getRootDir(conf);
346    FileSystem fs = rootDir.getFileSystem(conf);
347    Path p = CommonFSUtils.getTableDir(rootDir, table);
348    HBaseFsck.debugLsr(conf, p);
349    boolean success = fs.delete(p, true);
350    LOG.info("Deleted " + p + " sucessfully? " + success);
351  }
352
353  /**
354   * We don't have an easy way to verify that a flush completed, so we loop until we find a
355   * legitimate hfile and return it.
356   * @param fs
357   * @param table
358   * @return Path of a flushed hfile.
359   * @throws IOException
360   */
361  Path getFlushedHFile(FileSystem fs, TableName table) throws IOException {
362    Path tableDir= CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), table);
363    Path regionDir = FSUtils.getRegionDirs(fs, tableDir).get(0);
364    Path famDir = new Path(regionDir, FAM_STR);
365
366    // keep doing this until we get a legit hfile
367    while (true) {
368      FileStatus[] hfFss = fs.listStatus(famDir);
369      if (hfFss.length == 0) {
370        continue;
371      }
372      for (FileStatus hfs : hfFss) {
373        if (!hfs.isDirectory()) {
374          return hfs.getPath();
375        }
376      }
377    }
378  }
379
380  /**
381   * Gets flushed mob files.
382   * @param fs The current file system.
383   * @param table The current table name.
384   * @return Path of a flushed hfile.
385   * @throws IOException
386   */
387  Path getFlushedMobFile(FileSystem fs, TableName table) throws IOException {
388    Path famDir = MobUtils.getMobFamilyPath(conf, table, FAM_STR);
389
390    // keep doing this until we get a legit hfile
391    while (true) {
392      FileStatus[] hfFss = fs.listStatus(famDir);
393      if (hfFss.length == 0) {
394        continue;
395      }
396      for (FileStatus hfs : hfFss) {
397        if (!hfs.isDirectory()) {
398          return hfs.getPath();
399        }
400      }
401    }
402  }
403
404  /**
405   * Creates a new mob file name by the old one.
406   * @param oldFileName The old mob file name.
407   * @return The new mob file name.
408   */
409  String createMobFileName(String oldFileName) {
410    MobFileName mobFileName = MobFileName.create(oldFileName);
411    String startKey = mobFileName.getStartKey();
412    String date = mobFileName.getDate();
413    return MobFileName.create(startKey, date,
414                              TEST_UTIL.getRandomUUID().toString().replaceAll("-", ""), "abcdef")
415      .getFileName();
416  }
417
418
419
420
421  /**
422   * Test that use this should have a timeout, because this method could potentially wait forever.
423  */
424  protected void doQuarantineTest(TableName table, HBaseFsck hbck, int check,
425                                  int corrupt, int fail, int quar, int missing) throws Exception {
426    try {
427      setupTable(table);
428      assertEquals(ROWKEYS.length, countRows());
429      admin.flush(table); // flush is async.
430
431      // Mess it up by leaving a hole in the assignment, meta, and hdfs data
432      admin.disableTable(table);
433
434      String[] args = {"-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission",
435          table.getNameAsString()};
436      HBaseFsck res = hbck.exec(hbfsckExecutorService, args);
437
438      HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
439      assertEquals(hfcc.getHFilesChecked(), check);
440      assertEquals(hfcc.getCorrupted().size(), corrupt);
441      assertEquals(hfcc.getFailures().size(), fail);
442      assertEquals(hfcc.getQuarantined().size(), quar);
443      assertEquals(hfcc.getMissing().size(), missing);
444
445      // its been fixed, verify that we can enable
446      admin.enableTableAsync(table);
447      while (!admin.isTableEnabled(table)) {
448        try {
449          Thread.sleep(250);
450        } catch (InterruptedException e) {
451          e.printStackTrace();
452          fail("Interrupted when trying to enable table " + table);
453        }
454      }
455    } finally {
456      cleanupTable(table);
457    }
458  }
459
460
461  static class MockErrorReporter implements HbckErrorReporter {
462    static int calledCount = 0;
463
464    @Override
465    public void clear() {
466      calledCount++;
467    }
468
469    @Override
470    public void report(String message) {
471      calledCount++;
472    }
473
474    @Override
475    public void reportError(String message) {
476      calledCount++;
477    }
478
479    @Override
480    public void reportError(ERROR_CODE errorCode, String message) {
481      calledCount++;
482    }
483
484    @Override
485    public void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table) {
486      calledCount++;
487    }
488
489    @Override
490    public void reportError(ERROR_CODE errorCode,
491        String message, HbckTableInfo table, HbckRegionInfo info) {
492      calledCount++;
493    }
494
495    @Override
496    public void reportError(ERROR_CODE errorCode, String message,
497        HbckTableInfo table, HbckRegionInfo info1, HbckRegionInfo info2) {
498      calledCount++;
499    }
500
501    @Override
502    public int summarize() {
503      return ++calledCount;
504    }
505
506    @Override
507    public void detail(String details) {
508      calledCount++;
509    }
510
511    @Override
512    public ArrayList<ERROR_CODE> getErrorList() {
513      calledCount++;
514      return new ArrayList<>();
515    }
516
517    @Override
518    public void progress() {
519      calledCount++;
520    }
521
522    @Override
523    public void print(String message) {
524      calledCount++;
525    }
526
527    @Override
528    public void resetErrors() {
529      calledCount++;
530    }
531
532    @Override
533    public boolean tableHasErrors(HbckTableInfo table) {
534      calledCount++;
535      return false;
536    }
537  }
538
539
540  protected void deleteMetaRegion(Configuration conf, boolean unassign, boolean hdfs,
541                                  boolean regionInfoOnly) throws IOException, InterruptedException {
542    HRegionLocation metaLocation = connection.getRegionLocator(TableName.META_TABLE_NAME)
543        .getRegionLocation(HConstants.EMPTY_START_ROW);
544    ServerName hsa = metaLocation.getServerName();
545    RegionInfo hri = metaLocation.getRegion();
546    if (unassign) {
547      LOG.info("Undeploying meta region " + hri + " from server " + hsa);
548      try (Connection unmanagedConnection = ConnectionFactory.createConnection(conf)) {
549        undeployRegion(unmanagedConnection, hsa, hri);
550      }
551    }
552
553    if (regionInfoOnly) {
554      LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString());
555      Path rootDir = CommonFSUtils.getRootDir(conf);
556      FileSystem fs = rootDir.getFileSystem(conf);
557      Path p = new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(),
558          hri.getEncodedName());
559      Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
560      fs.delete(hriPath, true);
561    }
562
563    if (hdfs) {
564      LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
565      Path rootDir = CommonFSUtils.getRootDir(conf);
566      FileSystem fs = rootDir.getFileSystem(conf);
567      Path p = new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(),
568          hri.getEncodedName());
569      HBaseFsck.debugLsr(conf, p);
570      boolean success = fs.delete(p, true);
571      LOG.info("Deleted " + p + " sucessfully? " + success);
572      HBaseFsck.debugLsr(conf, p);
573    }
574  }
575
576  @org.junit.Rule
577  public TestName name = new TestName();
578
579  public static class MasterSyncCoprocessor implements MasterCoprocessor, MasterObserver {
580    volatile CountDownLatch tableCreationLatch = null;
581    volatile CountDownLatch tableDeletionLatch = null;
582
583    @Override
584    public Optional<MasterObserver> getMasterObserver() {
585      return Optional.of(this);
586    }
587
588    @Override
589    public void postCompletedCreateTableAction(
590        final ObserverContext<MasterCoprocessorEnvironment> ctx,
591        final TableDescriptor desc,
592        final RegionInfo[] regions) throws IOException {
593      // the AccessController test, some times calls only and directly the
594      // postCompletedCreateTableAction()
595      if (tableCreationLatch != null) {
596        tableCreationLatch.countDown();
597      }
598    }
599
600    @Override
601    public void postCompletedDeleteTableAction(
602        final ObserverContext<MasterCoprocessorEnvironment> ctx,
603        final TableName tableName) throws IOException {
604      // the AccessController test, some times calls only and directly the
605      // postCompletedDeleteTableAction()
606      if (tableDeletionLatch != null) {
607        tableDeletionLatch.countDown();
608      }
609    }
610  }
611
612  public static void createTable(HBaseTestingUtil testUtil, TableDescriptor tableDescriptor,
613      byte[][] splitKeys) throws Exception {
614    // NOTE: We need a latch because admin is not sync,
615    // so the postOp coprocessor method may be called after the admin operation returned.
616    MasterSyncCoprocessor coproc = testUtil.getHBaseCluster().getMaster()
617        .getMasterCoprocessorHost().findCoprocessor(MasterSyncCoprocessor.class);
618    coproc.tableCreationLatch = new CountDownLatch(1);
619    if (splitKeys != null) {
620      admin.createTable(tableDescriptor, splitKeys);
621    } else {
622      admin.createTable(tableDescriptor);
623    }
624    coproc.tableCreationLatch.await();
625    coproc.tableCreationLatch = null;
626    testUtil.waitUntilAllRegionsAssigned(tableDescriptor.getTableName());
627  }
628
629  public static void deleteTable(HBaseTestingUtil testUtil, TableName tableName)
630    throws Exception {
631    // NOTE: We need a latch because admin is not sync,
632    // so the postOp coprocessor method may be called after the admin operation returned.
633    MasterSyncCoprocessor coproc = testUtil.getHBaseCluster().getMaster()
634      .getMasterCoprocessorHost().findCoprocessor(MasterSyncCoprocessor.class);
635    coproc.tableDeletionLatch = new CountDownLatch(1);
636    try {
637      admin.disableTable(tableName);
638    } catch (Exception e) {
639      LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
640    }
641    admin.deleteTable(tableName);
642    coproc.tableDeletionLatch.await();
643    coproc.tableDeletionLatch = null;
644  }
645}