001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.util;
020
021import static org.junit.Assert.assertEquals;
022import static org.junit.Assert.fail;
023
024import java.io.IOException;
025import java.util.ArrayList;
026import java.util.Collection;
027import java.util.EnumSet;
028import java.util.HashMap;
029import java.util.List;
030import java.util.Map;
031import java.util.Optional;
032import java.util.concurrent.CountDownLatch;
033import java.util.concurrent.ExecutorService;
034import java.util.concurrent.ScheduledThreadPoolExecutor;
035import org.apache.hadoop.conf.Configuration;
036import org.apache.hadoop.fs.FileStatus;
037import org.apache.hadoop.fs.FileSystem;
038import org.apache.hadoop.fs.Path;
039import org.apache.hadoop.hbase.ClusterMetrics;
040import org.apache.hadoop.hbase.ClusterMetrics.Option;
041import org.apache.hadoop.hbase.HBaseTestingUtility;
042import org.apache.hadoop.hbase.HColumnDescriptor;
043import org.apache.hadoop.hbase.HConstants;
044import org.apache.hadoop.hbase.HRegionLocation;
045import org.apache.hadoop.hbase.HTableDescriptor;
046import org.apache.hadoop.hbase.ServerName;
047import org.apache.hadoop.hbase.TableName;
048import org.apache.hadoop.hbase.client.Admin;
049import org.apache.hadoop.hbase.client.ClusterConnection;
050import org.apache.hadoop.hbase.client.Connection;
051import org.apache.hadoop.hbase.client.ConnectionFactory;
052import org.apache.hadoop.hbase.client.Delete;
053import org.apache.hadoop.hbase.client.Put;
054import org.apache.hadoop.hbase.client.RegionInfo;
055import org.apache.hadoop.hbase.client.RegionLocator;
056import org.apache.hadoop.hbase.client.Scan;
057import org.apache.hadoop.hbase.client.Table;
058import org.apache.hadoop.hbase.client.TableDescriptor;
059import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
060import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
061import org.apache.hadoop.hbase.coprocessor.MasterObserver;
062import org.apache.hadoop.hbase.coprocessor.ObserverContext;
063import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
064import org.apache.hadoop.hbase.master.assignment.RegionStates;
065import org.apache.hadoop.hbase.mob.MobFileName;
066import org.apache.hadoop.hbase.mob.MobUtils;
067import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
068import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
069import org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo;
070import org.apache.hadoop.hbase.util.HBaseFsck.TableInfo;
071import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
072import org.apache.zookeeper.KeeperException;
073import org.junit.rules.TestName;
074import org.slf4j.Logger;
075import org.slf4j.LoggerFactory;
076
077import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
078import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
079
080/**
081 * This is the base class for  HBaseFsck's ability to detect reasons for inconsistent tables.
082 *
083 * Actual tests are in :
084 * TestHBaseFsckTwoRS
085 * TestHBaseFsckOneRS
086 * TestHBaseFsckMOB
087 * TestHBaseFsckReplicas
088 */
089public class BaseTestHBaseFsck {
090  static final int POOL_SIZE = 7;
091  protected static final Logger LOG = LoggerFactory.getLogger(BaseTestHBaseFsck.class);
092  protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
093  protected final static Configuration conf = TEST_UTIL.getConfiguration();
094  protected final static String FAM_STR = "fam";
095  protected final static byte[] FAM = Bytes.toBytes(FAM_STR);
096  protected final static int REGION_ONLINE_TIMEOUT = 800;
097  protected static AssignmentManager assignmentManager;
098  protected static RegionStates regionStates;
099  protected static ExecutorService tableExecutorService;
100  protected static ScheduledThreadPoolExecutor hbfsckExecutorService;
101  protected static ClusterConnection connection;
102  protected static Admin admin;
103
104  // for the instance, reset every test run
105  protected Table tbl;
106  protected final static byte[][] SPLITS = new byte[][] { Bytes.toBytes("A"),
107    Bytes.toBytes("B"), Bytes.toBytes("C") };
108  // one row per region.
109  protected final static byte[][] ROWKEYS= new byte[][] {
110    Bytes.toBytes("00"), Bytes.toBytes("50"), Bytes.toBytes("A0"), Bytes.toBytes("A5"),
111    Bytes.toBytes("B0"), Bytes.toBytes("B5"), Bytes.toBytes("C0"), Bytes.toBytes("C5") };
112
113  /**
114   * Debugging method to dump the contents of meta.
115   */
116  protected void dumpMeta(TableName tableName) throws IOException {
117    List<byte[]> metaRows = TEST_UTIL.getMetaTableRows(tableName);
118    for (byte[] row : metaRows) {
119      LOG.info(Bytes.toString(row));
120    }
121  }
122
123  /**
124   * This method is used to undeploy a region -- close it and attempt to
125   * remove its state from the Master.
126   */
127  protected void undeployRegion(Connection conn, ServerName sn,
128      RegionInfo hri) throws IOException, InterruptedException {
129    try {
130      HBaseFsckRepair.closeRegionSilentlyAndWait(conn, sn, hri);
131      if (!hri.isMetaRegion()) {
132        admin.offline(hri.getRegionName());
133      }
134    } catch (IOException ioe) {
135      LOG.warn("Got exception when attempting to offline region "
136          + Bytes.toString(hri.getRegionName()), ioe);
137    }
138  }
139  /**
140   * Delete a region from assignments, meta, or completely from hdfs.
141   * @param unassign if true unassign region if assigned
142   * @param metaRow  if true remove region's row from META
143   * @param hdfs if true remove region's dir in HDFS
144   */
145  protected void deleteRegion(Configuration conf, final HTableDescriptor htd,
146      byte[] startKey, byte[] endKey, boolean unassign, boolean metaRow,
147      boolean hdfs) throws IOException, InterruptedException {
148    deleteRegion(conf, htd, startKey, endKey, unassign, metaRow, hdfs, false,
149        RegionInfo.DEFAULT_REPLICA_ID);
150  }
151
152  /**
153   * Delete a region from assignments, meta, or completely from hdfs.
154   * @param unassign if true unassign region if assigned
155   * @param metaRow  if true remove region's row from META
156   * @param hdfs if true remove region's dir in HDFS
157   * @param regionInfoOnly if true remove a region dir's .regioninfo file
158   * @param replicaId replica id
159   */
160  protected void deleteRegion(Configuration conf, final HTableDescriptor htd,
161      byte[] startKey, byte[] endKey, boolean unassign, boolean metaRow,
162      boolean hdfs, boolean regionInfoOnly, int replicaId)
163          throws IOException, InterruptedException {
164    LOG.info("** Before delete:");
165    dumpMeta(htd.getTableName());
166
167    List<HRegionLocation> locations;
168    try(RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
169      locations = rl.getAllRegionLocations();
170    }
171
172    for (HRegionLocation location : locations) {
173      RegionInfo hri = location.getRegionInfo();
174      ServerName hsa = location.getServerName();
175      if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
176          && Bytes.compareTo(hri.getEndKey(), endKey) == 0
177          && hri.getReplicaId() == replicaId) {
178
179        LOG.info("RegionName: " +hri.getRegionNameAsString());
180        byte[] deleteRow = hri.getRegionName();
181
182        if (unassign) {
183          LOG.info("Undeploying region " + hri + " from server " + hsa);
184          undeployRegion(connection, hsa, hri);
185        }
186
187        if (regionInfoOnly) {
188          LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString());
189          Path rootDir = FSUtils.getRootDir(conf);
190          FileSystem fs = rootDir.getFileSystem(conf);
191          Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()),
192              hri.getEncodedName());
193          Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
194          fs.delete(hriPath, true);
195        }
196
197        if (hdfs) {
198          LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
199          Path rootDir = FSUtils.getRootDir(conf);
200          FileSystem fs = rootDir.getFileSystem(conf);
201          Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()),
202              hri.getEncodedName());
203          HBaseFsck.debugLsr(conf, p);
204          boolean success = fs.delete(p, true);
205          LOG.info("Deleted " + p + " sucessfully? " + success);
206          HBaseFsck.debugLsr(conf, p);
207        }
208
209        if (metaRow) {
210          try (Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService)) {
211            Delete delete = new Delete(deleteRow);
212            meta.delete(delete);
213          }
214        }
215      }
216      LOG.info(hri.toString() + hsa.toString());
217    }
218
219    TEST_UTIL.getMetaTableRows(htd.getTableName());
220    LOG.info("*** After delete:");
221    dumpMeta(htd.getTableName());
222  }
223
224  /**
225   * Setup a clean table before we start mucking with it.
226   *
227   * It will set tbl which needs to be closed after test
228   *
229   * @throws IOException
230   * @throws InterruptedException
231   * @throws KeeperException
232   */
233  void setupTable(TableName tablename) throws Exception {
234    setupTableWithRegionReplica(tablename, 1);
235  }
236
237  /**
238   * Setup a clean table with a certain region_replica count
239   *
240   * It will set tbl which needs to be closed after test
241   *
242   * @throws Exception
243   */
244  void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception {
245    HTableDescriptor desc = new HTableDescriptor(tablename);
246    desc.setRegionReplication(replicaCount);
247    HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
248    desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
249    createTable(TEST_UTIL, desc, SPLITS);
250
251    tbl = connection.getTable(tablename, tableExecutorService);
252    List<Put> puts = new ArrayList<>(ROWKEYS.length);
253    for (byte[] row : ROWKEYS) {
254      Put p = new Put(row);
255      p.addColumn(FAM, Bytes.toBytes("val"), row);
256      puts.add(p);
257    }
258    tbl.put(puts);
259  }
260
261  /**
262   * Setup a clean table with a mob-enabled column.
263   *
264   * @param tablename The name of a table to be created.
265   * @throws Exception
266   */
267  void setupMobTable(TableName tablename) throws Exception {
268    HTableDescriptor desc = new HTableDescriptor(tablename);
269    HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
270    hcd.setMobEnabled(true);
271    hcd.setMobThreshold(0);
272    desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
273    createTable(TEST_UTIL, desc, SPLITS);
274
275    tbl = connection.getTable(tablename, tableExecutorService);
276    List<Put> puts = new ArrayList<>(ROWKEYS.length);
277    for (byte[] row : ROWKEYS) {
278      Put p = new Put(row);
279      p.addColumn(FAM, Bytes.toBytes("val"), row);
280      puts.add(p);
281    }
282    tbl.put(puts);
283  }
284
285  /**
286   * Counts the number of rows to verify data loss or non-dataloss.
287   */
288  int countRows() throws IOException {
289     return TEST_UTIL.countRows(tbl);
290  }
291
292  /**
293   * Counts the number of rows to verify data loss or non-dataloss.
294   */
295  int countRows(byte[] start, byte[] end) throws IOException {
296    return TEST_UTIL.countRows(tbl, new Scan(start, end));
297  }
298
299  /**
300   * delete table in preparation for next test
301   *
302   * @param tablename
303   * @throws IOException
304   */
305  void cleanupTable(TableName tablename) throws Exception {
306    if (tbl != null) {
307      tbl.close();
308      tbl = null;
309    }
310
311    ((ClusterConnection) connection).clearRegionCache();
312    deleteTable(TEST_UTIL, tablename);
313  }
314
315  /**
316   * Get region info from local cluster.
317   */
318  Map<ServerName, List<String>> getDeployedHRIs(final Admin admin) throws IOException {
319    ClusterMetrics status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
320    Collection<ServerName> regionServers = status.getLiveServerMetrics().keySet();
321    Map<ServerName, List<String>> mm = new HashMap<>();
322    for (ServerName hsi : regionServers) {
323      AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi);
324
325      // list all online regions from this region server
326      List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
327      List<String> regionNames = new ArrayList<>(regions.size());
328      for (RegionInfo hri : regions) {
329        regionNames.add(hri.getRegionNameAsString());
330      }
331      mm.put(hsi, regionNames);
332    }
333    return mm;
334  }
335
336  /**
337   * Returns the HSI a region info is on.
338   */
339  ServerName findDeployedHSI(Map<ServerName, List<String>> mm, RegionInfo hri) {
340    for (Map.Entry<ServerName,List <String>> e : mm.entrySet()) {
341      if (e.getValue().contains(hri.getRegionNameAsString())) {
342        return e.getKey();
343      }
344    }
345    return null;
346  }
347
348  public void deleteTableDir(TableName table) throws IOException {
349    Path rootDir = FSUtils.getRootDir(conf);
350    FileSystem fs = rootDir.getFileSystem(conf);
351    Path p = FSUtils.getTableDir(rootDir, table);
352    HBaseFsck.debugLsr(conf, p);
353    boolean success = fs.delete(p, true);
354    LOG.info("Deleted " + p + " sucessfully? " + success);
355  }
356
357  /**
358   * We don't have an easy way to verify that a flush completed, so we loop until we find a
359   * legitimate hfile and return it.
360   * @param fs
361   * @param table
362   * @return Path of a flushed hfile.
363   * @throws IOException
364   */
365  Path getFlushedHFile(FileSystem fs, TableName table) throws IOException {
366    Path tableDir= FSUtils.getTableDir(FSUtils.getRootDir(conf), table);
367    Path regionDir = FSUtils.getRegionDirs(fs, tableDir).get(0);
368    Path famDir = new Path(regionDir, FAM_STR);
369
370    // keep doing this until we get a legit hfile
371    while (true) {
372      FileStatus[] hfFss = fs.listStatus(famDir);
373      if (hfFss.length == 0) {
374        continue;
375      }
376      for (FileStatus hfs : hfFss) {
377        if (!hfs.isDirectory()) {
378          return hfs.getPath();
379        }
380      }
381    }
382  }
383
384  /**
385   * Gets flushed mob files.
386   * @param fs The current file system.
387   * @param table The current table name.
388   * @return Path of a flushed hfile.
389   * @throws IOException
390   */
391  Path getFlushedMobFile(FileSystem fs, TableName table) throws IOException {
392    Path famDir = MobUtils.getMobFamilyPath(conf, table, FAM_STR);
393
394    // keep doing this until we get a legit hfile
395    while (true) {
396      FileStatus[] hfFss = fs.listStatus(famDir);
397      if (hfFss.length == 0) {
398        continue;
399      }
400      for (FileStatus hfs : hfFss) {
401        if (!hfs.isDirectory()) {
402          return hfs.getPath();
403        }
404      }
405    }
406  }
407
408  /**
409   * Creates a new mob file name by the old one.
410   * @param oldFileName The old mob file name.
411   * @return The new mob file name.
412   */
413  String createMobFileName(String oldFileName) {
414    MobFileName mobFileName = MobFileName.create(oldFileName);
415    String startKey = mobFileName.getStartKey();
416    String date = mobFileName.getDate();
417    return MobFileName.create(startKey, date,
418                              TEST_UTIL.getRandomUUID().toString().replaceAll("-", ""))
419      .getFileName();
420  }
421
422
423
424
425  /**
426   * Test that use this should have a timeout, because this method could potentially wait forever.
427  */
428  protected void doQuarantineTest(TableName table, HBaseFsck hbck, int check,
429                                  int corrupt, int fail, int quar, int missing) throws Exception {
430    try {
431      setupTable(table);
432      assertEquals(ROWKEYS.length, countRows());
433      admin.flush(table); // flush is async.
434
435      // Mess it up by leaving a hole in the assignment, meta, and hdfs data
436      admin.disableTable(table);
437
438      String[] args = {"-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission",
439          table.getNameAsString()};
440      HBaseFsck res = hbck.exec(hbfsckExecutorService, args);
441
442      HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
443      assertEquals(hfcc.getHFilesChecked(), check);
444      assertEquals(hfcc.getCorrupted().size(), corrupt);
445      assertEquals(hfcc.getFailures().size(), fail);
446      assertEquals(hfcc.getQuarantined().size(), quar);
447      assertEquals(hfcc.getMissing().size(), missing);
448
449      // its been fixed, verify that we can enable
450      admin.enableTableAsync(table);
451      while (!admin.isTableEnabled(table)) {
452        try {
453          Thread.sleep(250);
454        } catch (InterruptedException e) {
455          e.printStackTrace();
456          fail("Interrupted when trying to enable table " + table);
457        }
458      }
459    } finally {
460      cleanupTable(table);
461    }
462  }
463
464
465  static class MockErrorReporter implements ErrorReporter {
466    static int calledCount = 0;
467
468    @Override
469    public void clear() {
470      calledCount++;
471    }
472
473    @Override
474    public void report(String message) {
475      calledCount++;
476    }
477
478    @Override
479    public void reportError(String message) {
480      calledCount++;
481    }
482
483    @Override
484    public void reportError(ERROR_CODE errorCode, String message) {
485      calledCount++;
486    }
487
488    @Override
489    public void reportError(ERROR_CODE errorCode, String message, TableInfo table) {
490      calledCount++;
491    }
492
493    @Override
494    public void reportError(ERROR_CODE errorCode,
495        String message, TableInfo table, HbckInfo info) {
496      calledCount++;
497    }
498
499    @Override
500    public void reportError(ERROR_CODE errorCode, String message,
501        TableInfo table, HbckInfo info1, HbckInfo info2) {
502      calledCount++;
503    }
504
505    @Override
506    public int summarize() {
507      return ++calledCount;
508    }
509
510    @Override
511    public void detail(String details) {
512      calledCount++;
513    }
514
515    @Override
516    public ArrayList<ERROR_CODE> getErrorList() {
517      calledCount++;
518      return new ArrayList<>();
519    }
520
521    @Override
522    public void progress() {
523      calledCount++;
524    }
525
526    @Override
527    public void print(String message) {
528      calledCount++;
529    }
530
531    @Override
532    public void resetErrors() {
533      calledCount++;
534    }
535
536    @Override
537    public boolean tableHasErrors(TableInfo table) {
538      calledCount++;
539      return false;
540    }
541  }
542
543
544  protected void deleteMetaRegion(Configuration conf, boolean unassign, boolean hdfs,
545                                  boolean regionInfoOnly) throws IOException, InterruptedException {
546    HRegionLocation metaLocation = connection.getRegionLocator(TableName.META_TABLE_NAME)
547        .getRegionLocation(HConstants.EMPTY_START_ROW);
548    ServerName hsa = metaLocation.getServerName();
549    RegionInfo hri = metaLocation.getRegionInfo();
550    if (unassign) {
551      LOG.info("Undeploying meta region " + hri + " from server " + hsa);
552      try (Connection unmanagedConnection = ConnectionFactory.createConnection(conf)) {
553        undeployRegion(unmanagedConnection, hsa, hri);
554      }
555    }
556
557    if (regionInfoOnly) {
558      LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString());
559      Path rootDir = FSUtils.getRootDir(conf);
560      FileSystem fs = rootDir.getFileSystem(conf);
561      Path p = new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(),
562          hri.getEncodedName());
563      Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
564      fs.delete(hriPath, true);
565    }
566
567    if (hdfs) {
568      LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
569      Path rootDir = FSUtils.getRootDir(conf);
570      FileSystem fs = rootDir.getFileSystem(conf);
571      Path p = new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(),
572          hri.getEncodedName());
573      HBaseFsck.debugLsr(conf, p);
574      boolean success = fs.delete(p, true);
575      LOG.info("Deleted " + p + " sucessfully? " + success);
576      HBaseFsck.debugLsr(conf, p);
577    }
578  }
579
580  @org.junit.Rule
581  public TestName name = new TestName();
582
583  public static class MasterSyncCoprocessor implements MasterCoprocessor, MasterObserver {
584    volatile CountDownLatch tableCreationLatch = null;
585    volatile CountDownLatch tableDeletionLatch = null;
586
587    @Override
588    public Optional<MasterObserver> getMasterObserver() {
589      return Optional.of(this);
590    }
591
592    @Override
593    public void postCompletedCreateTableAction(
594        final ObserverContext<MasterCoprocessorEnvironment> ctx,
595        final TableDescriptor desc,
596        final RegionInfo[] regions) throws IOException {
597      // the AccessController test, some times calls only and directly the
598      // postCompletedCreateTableAction()
599      if (tableCreationLatch != null) {
600        tableCreationLatch.countDown();
601      }
602    }
603
604    @Override
605    public void postCompletedDeleteTableAction(
606        final ObserverContext<MasterCoprocessorEnvironment> ctx,
607        final TableName tableName) throws IOException {
608      // the AccessController test, some times calls only and directly the
609      // postCompletedDeleteTableAction()
610      if (tableDeletionLatch != null) {
611        tableDeletionLatch.countDown();
612      }
613    }
614  }
615
616  public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd,
617    byte [][] splitKeys) throws Exception {
618    // NOTE: We need a latch because admin is not sync,
619    // so the postOp coprocessor method may be called after the admin operation returned.
620    MasterSyncCoprocessor coproc = testUtil.getHBaseCluster().getMaster()
621        .getMasterCoprocessorHost().findCoprocessor(MasterSyncCoprocessor.class);
622    coproc.tableCreationLatch = new CountDownLatch(1);
623    if (splitKeys != null) {
624      admin.createTable(htd, splitKeys);
625    } else {
626      admin.createTable(htd);
627    }
628    coproc.tableCreationLatch.await();
629    coproc.tableCreationLatch = null;
630    testUtil.waitUntilAllRegionsAssigned(htd.getTableName());
631  }
632
633  public static void deleteTable(HBaseTestingUtility testUtil, TableName tableName)
634    throws Exception {
635    // NOTE: We need a latch because admin is not sync,
636    // so the postOp coprocessor method may be called after the admin operation returned.
637    MasterSyncCoprocessor coproc = testUtil.getHBaseCluster().getMaster()
638      .getMasterCoprocessorHost().findCoprocessor(MasterSyncCoprocessor.class);
639    coproc.tableDeletionLatch = new CountDownLatch(1);
640    try {
641      admin.disableTable(tableName);
642    } catch (Exception e) {
643      LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
644    }
645    admin.deleteTable(tableName);
646    coproc.tableDeletionLatch.await();
647    coproc.tableDeletionLatch = null;
648  }
649}