001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertTrue;
022import static org.junit.Assert.fail;
023
024import edu.umd.cs.findbugs.annotations.Nullable;
025import java.io.File;
026import java.io.IOException;
027import java.io.OutputStream;
028import java.lang.reflect.Field;
029import java.lang.reflect.Modifier;
030import java.net.BindException;
031import java.net.DatagramSocket;
032import java.net.InetAddress;
033import java.net.ServerSocket;
034import java.net.Socket;
035import java.net.UnknownHostException;
036import java.nio.charset.StandardCharsets;
037import java.security.MessageDigest;
038import java.util.ArrayList;
039import java.util.Arrays;
040import java.util.Collection;
041import java.util.Collections;
042import java.util.EnumSet;
043import java.util.HashSet;
044import java.util.Iterator;
045import java.util.List;
046import java.util.Map;
047import java.util.NavigableSet;
048import java.util.Properties;
049import java.util.Random;
050import java.util.Set;
051import java.util.TreeSet;
052import java.util.concurrent.TimeUnit;
053import java.util.concurrent.atomic.AtomicReference;
054import java.util.stream.Collectors;
055import org.apache.commons.io.FileUtils;
056import org.apache.commons.lang3.RandomStringUtils;
057import org.apache.commons.logging.impl.Jdk14Logger;
058import org.apache.commons.logging.impl.Log4JLogger;
059import org.apache.hadoop.conf.Configuration;
060import org.apache.hadoop.fs.FileSystem;
061import org.apache.hadoop.fs.Path;
062import org.apache.hadoop.hbase.ClusterMetrics.Option;
063import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
064import org.apache.hadoop.hbase.Waiter.Predicate;
065import org.apache.hadoop.hbase.client.Admin;
066import org.apache.hadoop.hbase.client.BufferedMutator;
067import org.apache.hadoop.hbase.client.ClusterConnection;
068import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
069import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
070import org.apache.hadoop.hbase.client.Connection;
071import org.apache.hadoop.hbase.client.ConnectionFactory;
072import org.apache.hadoop.hbase.client.Consistency;
073import org.apache.hadoop.hbase.client.Delete;
074import org.apache.hadoop.hbase.client.Durability;
075import org.apache.hadoop.hbase.client.Get;
076import org.apache.hadoop.hbase.client.HBaseAdmin;
077import org.apache.hadoop.hbase.client.Hbck;
078import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
079import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
080import org.apache.hadoop.hbase.client.Put;
081import org.apache.hadoop.hbase.client.RegionInfo;
082import org.apache.hadoop.hbase.client.RegionInfoBuilder;
083import org.apache.hadoop.hbase.client.RegionLocator;
084import org.apache.hadoop.hbase.client.Result;
085import org.apache.hadoop.hbase.client.ResultScanner;
086import org.apache.hadoop.hbase.client.Scan;
087import org.apache.hadoop.hbase.client.Table;
088import org.apache.hadoop.hbase.client.TableDescriptor;
089import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
090import org.apache.hadoop.hbase.client.TableState;
091import org.apache.hadoop.hbase.fs.HFileSystem;
092import org.apache.hadoop.hbase.io.compress.Compression;
093import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
094import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
095import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
096import org.apache.hadoop.hbase.io.hfile.HFile;
097import org.apache.hadoop.hbase.ipc.RpcServerInterface;
098import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
099import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
100import org.apache.hadoop.hbase.master.HMaster;
101import org.apache.hadoop.hbase.master.RegionState;
102import org.apache.hadoop.hbase.master.ServerManager;
103import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
104import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
105import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
106import org.apache.hadoop.hbase.master.assignment.RegionStates;
107import org.apache.hadoop.hbase.regionserver.BloomType;
108import org.apache.hadoop.hbase.regionserver.ChunkCreator;
109import org.apache.hadoop.hbase.regionserver.HRegion;
110import org.apache.hadoop.hbase.regionserver.HRegionServer;
111import org.apache.hadoop.hbase.regionserver.HStore;
112import org.apache.hadoop.hbase.regionserver.InternalScanner;
113import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
114import org.apache.hadoop.hbase.regionserver.Region;
115import org.apache.hadoop.hbase.regionserver.RegionScanner;
116import org.apache.hadoop.hbase.regionserver.RegionServerServices;
117import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
118import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
119import org.apache.hadoop.hbase.security.User;
120import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
121import org.apache.hadoop.hbase.trace.TraceUtil;
122import org.apache.hadoop.hbase.util.Bytes;
123import org.apache.hadoop.hbase.util.CommonFSUtils;
124import org.apache.hadoop.hbase.util.FSTableDescriptors;
125import org.apache.hadoop.hbase.util.FSUtils;
126import org.apache.hadoop.hbase.util.JVMClusterUtil;
127import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
128import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
129import org.apache.hadoop.hbase.util.Pair;
130import org.apache.hadoop.hbase.util.RegionSplitter;
131import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
132import org.apache.hadoop.hbase.util.RetryCounter;
133import org.apache.hadoop.hbase.util.Threads;
134import org.apache.hadoop.hbase.wal.WAL;
135import org.apache.hadoop.hbase.wal.WALFactory;
136import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
137import org.apache.hadoop.hbase.zookeeper.ZKConfig;
138import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
139import org.apache.hadoop.hdfs.DFSClient;
140import org.apache.hadoop.hdfs.DistributedFileSystem;
141import org.apache.hadoop.hdfs.MiniDFSCluster;
142import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
143import org.apache.hadoop.mapred.JobConf;
144import org.apache.hadoop.mapred.MiniMRCluster;
145import org.apache.hadoop.mapred.TaskLog;
146import org.apache.hadoop.minikdc.MiniKdc;
147import org.apache.log4j.LogManager;
148import org.apache.yetus.audience.InterfaceAudience;
149import org.apache.zookeeper.WatchedEvent;
150import org.apache.zookeeper.ZooKeeper;
151import org.apache.zookeeper.ZooKeeper.States;
152import org.slf4j.Logger;
153import org.slf4j.LoggerFactory;
154import org.slf4j.impl.Log4jLoggerAdapter;
155
156import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
157
158/**
159 * Facility for testing HBase. Replacement for
160 * old HBaseTestCase and HBaseClusterTestCase functionality.
161 * Create an instance and keep it around testing HBase.  This class is
162 * meant to be your one-stop shop for anything you might need testing.  Manages
163 * one cluster at a time only. Managed cluster can be an in-process
164 * {@link MiniHBaseCluster}, or a deployed cluster of type {@code DistributedHBaseCluster}.
165 * Not all methods work with the real cluster.
166 * Depends on log4j being on classpath and
167 * hbase-site.xml for logging and test-run configuration.  It does not set
168 * logging levels.
169 * In the configuration properties, default values for master-info-port and
170 * region-server-port are overridden such that a random port will be assigned (thus
171 * avoiding port contention if another local HBase instance is already running).
172 * <p>To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
173 * setting it to true.
174 * For triggering pre commit
175 */
176@InterfaceAudience.Public
177@SuppressWarnings("deprecation")
178public class HBaseTestingUtility extends HBaseZKTestingUtility {
179
180  /**
181   * System property key to get test directory value. Name is as it is because mini dfs has
182   * hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property
183   * used in mini dfs.
184   * @deprecated since 2.0.0 and will be removed in 3.0.0. Can be used only with mini dfs.
185   * @see <a href="https://issues.apache.org/jira/browse/HBASE-19410">HBASE-19410</a>
186   */
187  @Deprecated
188  private static final String TEST_DIRECTORY_KEY = "test.build.data";
189
190  public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
191  /**
192   * The default number of regions per regionserver when creating a pre-split
193   * table.
194   */
195  public static final int DEFAULT_REGIONS_PER_SERVER = 3;
196
197
198  public static final String PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table";
199  public static final boolean PRESPLIT_TEST_TABLE = true;
200
201  private MiniDFSCluster dfsCluster = null;
202
203  private volatile HBaseCluster hbaseCluster = null;
204  private MiniMRCluster mrCluster = null;
205
206  /** If there is a mini cluster running for this testing utility instance. */
207  private volatile boolean miniClusterRunning;
208
209  private String hadoopLogDir;
210
211  /** Directory on test filesystem where we put the data for this instance of
212    * HBaseTestingUtility*/
213  private Path dataTestDirOnTestFS = null;
214
215  /**
216   * Shared cluster connection.
217   */
218  private volatile Connection connection;
219
220  /** Filesystem URI used for map-reduce mini-cluster setup */
221  private static String FS_URI;
222
223  /** This is for unit tests parameterized with a single boolean. */
224  public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination();
225
226  /**
227   * Checks to see if a specific port is available.
228   *
229   * @param port the port number to check for availability
230   * @return <tt>true</tt> if the port is available, or <tt>false</tt> if not
231   */
232  public static boolean available(int port) {
233    ServerSocket ss = null;
234    DatagramSocket ds = null;
235    try {
236      ss = new ServerSocket(port);
237      ss.setReuseAddress(true);
238      ds = new DatagramSocket(port);
239      ds.setReuseAddress(true);
240      return true;
241    } catch (IOException e) {
242      // Do nothing
243    } finally {
244      if (ds != null) {
245        ds.close();
246      }
247
248      if (ss != null) {
249        try {
250          ss.close();
251        } catch (IOException e) {
252          /* should not be thrown */
253        }
254      }
255    }
256
257    return false;
258  }
259
260  /**
261   * Create all combinations of Bloom filters and compression algorithms for
262   * testing.
263   */
264  private static List<Object[]> bloomAndCompressionCombinations() {
265    List<Object[]> configurations = new ArrayList<>();
266    for (Compression.Algorithm comprAlgo :
267         HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
268      for (BloomType bloomType : BloomType.values()) {
269        configurations.add(new Object[] { comprAlgo, bloomType });
270      }
271    }
272    return Collections.unmodifiableList(configurations);
273  }
274
275  /**
276   * Create combination of memstoreTS and tags
277   */
278  private static List<Object[]> memStoreTSAndTagsCombination() {
279    List<Object[]> configurations = new ArrayList<>();
280    configurations.add(new Object[] { false, false });
281    configurations.add(new Object[] { false, true });
282    configurations.add(new Object[] { true, false });
283    configurations.add(new Object[] { true, true });
284    return Collections.unmodifiableList(configurations);
285  }
286
287  public static List<Object[]> memStoreTSTagsAndOffheapCombination() {
288    List<Object[]> configurations = new ArrayList<>();
289    configurations.add(new Object[] { false, false, true });
290    configurations.add(new Object[] { false, false, false });
291    configurations.add(new Object[] { false, true, true });
292    configurations.add(new Object[] { false, true, false });
293    configurations.add(new Object[] { true, false, true });
294    configurations.add(new Object[] { true, false, false });
295    configurations.add(new Object[] { true, true, true });
296    configurations.add(new Object[] { true, true, false });
297    return Collections.unmodifiableList(configurations);
298  }
299
300  public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
301      bloomAndCompressionCombinations();
302
303
304  /**
305   * <p>Create an HBaseTestingUtility using a default configuration.
306   *
307   * <p>Initially, all tmp files are written to a local test data directory.
308   * Once {@link #startMiniDFSCluster} is called, either directly or via
309   * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead.
310   *
311   * <p>Previously, there was a distinction between the type of utility returned by
312   * {@link #createLocalHTU()} and this constructor; this is no longer the case. All
313   * HBaseTestingUtility objects will behave as local until a DFS cluster is started,
314   * at which point they will switch to using mini DFS for storage.
315   */
316  public HBaseTestingUtility() {
317    this(HBaseConfiguration.create());
318  }
319
320  /**
321   * <p>Create an HBaseTestingUtility using a given configuration.
322   *
323   * <p>Initially, all tmp files are written to a local test data directory.
324   * Once {@link #startMiniDFSCluster} is called, either directly or via
325   * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead.
326   *
327   * <p>Previously, there was a distinction between the type of utility returned by
328   * {@link #createLocalHTU()} and this constructor; this is no longer the case. All
329   * HBaseTestingUtility objects will behave as local until a DFS cluster is started,
330   * at which point they will switch to using mini DFS for storage.
331   *
332   * @param conf The configuration to use for further operations
333   */
334  public HBaseTestingUtility(@Nullable Configuration conf) {
335    super(conf);
336
337    // a hbase checksum verification failure will cause unit tests to fail
338    ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
339
340    // Save this for when setting default file:// breaks things
341    if (this.conf.get("fs.defaultFS") != null) {
342      this.conf.set("original.defaultFS", this.conf.get("fs.defaultFS"));
343    }
344    if (this.conf.get(HConstants.HBASE_DIR) != null) {
345      this.conf.set("original.hbase.dir", this.conf.get(HConstants.HBASE_DIR));
346    }
347    // Every cluster is a local cluster until we start DFS
348    // Note that conf could be null, but this.conf will not be
349    String dataTestDir = getDataTestDir().toString();
350    this.conf.set("fs.defaultFS","file:///");
351    this.conf.set(HConstants.HBASE_DIR, "file://" + dataTestDir);
352    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);
353    this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE,false);
354    // If the value for random ports isn't set set it to true, thus making
355    // tests opt-out for random port assignment
356    this.conf.setBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS,
357        this.conf.getBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true));
358  }
359
360  /**
361   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #HBaseTestingUtility()}
362   *   instead.
363   * @return a normal HBaseTestingUtility
364   * @see #HBaseTestingUtility()
365   * @see <a href="https://issues.apache.org/jira/browse/HBASE-19841">HBASE-19841</a>
366   */
367  @Deprecated
368  public static HBaseTestingUtility createLocalHTU() {
369    return new HBaseTestingUtility();
370  }
371
372  /**
373   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
374   *   {@link #HBaseTestingUtility(Configuration)} instead.
375   * @return a normal HBaseTestingUtility
376   * @see #HBaseTestingUtility(Configuration)
377   * @see <a href="https://issues.apache.org/jira/browse/HBASE-19841">HBASE-19841</a>
378   */
379  @Deprecated
380  public static HBaseTestingUtility createLocalHTU(Configuration c) {
381    return new HBaseTestingUtility(c);
382  }
383
384  /**
385   * Close both the region {@code r} and it's underlying WAL. For use in tests.
386   */
387  public static void closeRegionAndWAL(final Region r) throws IOException {
388    closeRegionAndWAL((HRegion)r);
389  }
390
391  /**
392   * Close both the HRegion {@code r} and it's underlying WAL. For use in tests.
393   */
394  public static void closeRegionAndWAL(final HRegion r) throws IOException {
395    if (r == null) return;
396    r.close();
397    if (r.getWAL() == null) return;
398    r.getWAL().close();
399  }
400
401  /**
402   * Returns this classes's instance of {@link Configuration}.  Be careful how
403   * you use the returned Configuration since {@link Connection} instances
404   * can be shared.  The Map of Connections is keyed by the Configuration.  If
405   * say, a Connection was being used against a cluster that had been shutdown,
406   * see {@link #shutdownMiniCluster()}, then the Connection will no longer
407   * be wholesome.  Rather than use the return direct, its usually best to
408   * make a copy and use that.  Do
409   * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
410   * @return Instance of Configuration.
411   */
412  @Override
413  public Configuration getConfiguration() {
414    return super.getConfiguration();
415  }
416
417  public void setHBaseCluster(HBaseCluster hbaseCluster) {
418    this.hbaseCluster = hbaseCluster;
419  }
420
421  /**
422   * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
423   * Give it a random name so can have many concurrent tests running if
424   * we need to.  It needs to amend the {@link #TEST_DIRECTORY_KEY}
425   * System property, as it's what minidfscluster bases
426   * it data dir on.  Moding a System property is not the way to do concurrent
427   * instances -- another instance could grab the temporary
428   * value unintentionally -- but not anything can do about it at moment;
429   * single instance only is how the minidfscluster works.
430   *
431   * We also create the underlying directory for
432   *  hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values
433   *  in the conf, and as a system property for hadoop.tmp.dir
434   *
435   * @return The calculated data test build directory, if newly-created.
436   */
437  @Override
438  protected Path setupDataTestDir() {
439    Path testPath = super.setupDataTestDir();
440    if (null == testPath) {
441      return null;
442    }
443
444    createSubDirAndSystemProperty(
445      "hadoop.log.dir",
446      testPath, "hadoop-log-dir");
447
448    // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
449    //  we want our own value to ensure uniqueness on the same machine
450    createSubDirAndSystemProperty(
451      "hadoop.tmp.dir",
452      testPath, "hadoop-tmp-dir");
453
454    // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
455    createSubDir(
456      "mapreduce.cluster.local.dir",
457      testPath, "mapred-local-dir");
458
459    return testPath;
460  }
461
462  private void createSubDirAndSystemProperty(
463    String propertyName, Path parent, String subDirName){
464
465    String sysValue = System.getProperty(propertyName);
466
467    if (sysValue != null) {
468      // There is already a value set. So we do nothing but hope
469      //  that there will be no conflicts
470      LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
471        sysValue + " so I do NOT create it in " + parent);
472      String confValue = conf.get(propertyName);
473      if (confValue != null && !confValue.endsWith(sysValue)){
474       LOG.warn(
475         propertyName + " property value differs in configuration and system: "+
476         "Configuration="+confValue+" while System="+sysValue+
477         " Erasing configuration value by system value."
478       );
479      }
480      conf.set(propertyName, sysValue);
481    } else {
482      // Ok, it's not set, so we create it as a subdirectory
483      createSubDir(propertyName, parent, subDirName);
484      System.setProperty(propertyName, conf.get(propertyName));
485    }
486  }
487
488  /**
489   * @return Where to write test data on the test filesystem; Returns working directory
490   * for the test filesystem by default
491   * @see #setupDataTestDirOnTestFS()
492   * @see #getTestFileSystem()
493   */
494  private Path getBaseTestDirOnTestFS() throws IOException {
495    FileSystem fs = getTestFileSystem();
496    return new Path(fs.getWorkingDirectory(), "test-data");
497  }
498
499  /**
500   * @return META table descriptor
501   * @deprecated since 2.0 version and will be removed in 3.0 version.
502   *             use {@link #getMetaTableDescriptorBuilder()}
503   */
504  @Deprecated
505  public HTableDescriptor getMetaTableDescriptor() {
506    return new ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());
507  }
508
509  /**
510   * @return META table descriptor
511   */
512  public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
513    try {
514      return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
515    } catch (IOException e) {
516      throw new RuntimeException("Unable to create META table descriptor", e);
517    }
518  }
519
520  /**
521   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
522   * to write temporary test data. Call this method after setting up the mini dfs cluster
523   * if the test relies on it.
524   * @return a unique path in the test filesystem
525   */
526  public Path getDataTestDirOnTestFS() throws IOException {
527    if (dataTestDirOnTestFS == null) {
528      setupDataTestDirOnTestFS();
529    }
530
531    return dataTestDirOnTestFS;
532  }
533
534  /**
535   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
536   * to write temporary test data. Call this method after setting up the mini dfs cluster
537   * if the test relies on it.
538   * @return a unique path in the test filesystem
539   * @param subdirName name of the subdir to create under the base test dir
540   */
541  public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
542    return new Path(getDataTestDirOnTestFS(), subdirName);
543  }
544
545  /**
546   * Sets up a path in test filesystem to be used by tests.
547   * Creates a new directory if not already setup.
548   */
549  private void setupDataTestDirOnTestFS() throws IOException {
550    if (dataTestDirOnTestFS != null) {
551      LOG.warn("Data test on test fs dir already setup in "
552          + dataTestDirOnTestFS.toString());
553      return;
554    }
555    dataTestDirOnTestFS = getNewDataTestDirOnTestFS();
556  }
557
558  /**
559   * Sets up a new path in test filesystem to be used by tests.
560   */
561  private Path getNewDataTestDirOnTestFS() throws IOException {
562    //The file system can be either local, mini dfs, or if the configuration
563    //is supplied externally, it can be an external cluster FS. If it is a local
564    //file system, the tests should use getBaseTestDir, otherwise, we can use
565    //the working directory, and create a unique sub dir there
566    FileSystem fs = getTestFileSystem();
567    Path newDataTestDir;
568    String randomStr = getRandomUUID().toString();
569    if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
570      newDataTestDir = new Path(getDataTestDir(), randomStr);
571      File dataTestDir = new File(newDataTestDir.toString());
572      if (deleteOnExit()) dataTestDir.deleteOnExit();
573    } else {
574      Path base = getBaseTestDirOnTestFS();
575      newDataTestDir = new Path(base, randomStr);
576      if (deleteOnExit()) fs.deleteOnExit(newDataTestDir);
577    }
578    return newDataTestDir;
579  }
580
581  /**
582   * Cleans the test data directory on the test filesystem.
583   * @return True if we removed the test dirs
584   * @throws IOException
585   */
586  public boolean cleanupDataTestDirOnTestFS() throws IOException {
587    boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
588    if (ret)
589      dataTestDirOnTestFS = null;
590    return ret;
591  }
592
593  /**
594   * Cleans a subdirectory under the test data directory on the test filesystem.
595   * @return True if we removed child
596   * @throws IOException
597   */
598  public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
599    Path cpath = getDataTestDirOnTestFS(subdirName);
600    return getTestFileSystem().delete(cpath, true);
601  }
602
603  /**
604   * Start a minidfscluster.
605   * @param servers How many DNs to start.
606   * @throws Exception
607   * @see #shutdownMiniDFSCluster()
608   * @return The mini dfs cluster created.
609   */
610  public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
611    return startMiniDFSCluster(servers, null);
612  }
613
614  /**
615   * Start a minidfscluster.
616   * This is useful if you want to run datanode on distinct hosts for things
617   * like HDFS block location verification.
618   * If you start MiniDFSCluster without host names, all instances of the
619   * datanodes will have the same host name.
620   * @param hosts hostnames DNs to run on.
621   * @throws Exception
622   * @see #shutdownMiniDFSCluster()
623   * @return The mini dfs cluster created.
624   */
625  public MiniDFSCluster startMiniDFSCluster(final String hosts[])
626  throws Exception {
627    if ( hosts != null && hosts.length != 0) {
628      return startMiniDFSCluster(hosts.length, hosts);
629    } else {
630      return startMiniDFSCluster(1, null);
631    }
632  }
633
634  /**
635   * Start a minidfscluster.
636   * Can only create one.
637   * @param servers How many DNs to start.
638   * @param hosts hostnames DNs to run on.
639   * @throws Exception
640   * @see #shutdownMiniDFSCluster()
641   * @return The mini dfs cluster created.
642   */
643  public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
644  throws Exception {
645    return startMiniDFSCluster(servers, null, hosts);
646  }
647
648  private void setFs() throws IOException {
649    if(this.dfsCluster == null){
650      LOG.info("Skipping setting fs because dfsCluster is null");
651      return;
652    }
653    FileSystem fs = this.dfsCluster.getFileSystem();
654    FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
655
656    // re-enable this check with dfs
657    conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE);
658  }
659
660  public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
661      throws Exception {
662    createDirsAndSetProperties();
663    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
664
665    // Error level to skip some warnings specific to the minicluster. See HBASE-4709
666    org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
667        setLevel(org.apache.log4j.Level.ERROR);
668    org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
669        setLevel(org.apache.log4j.Level.ERROR);
670
671    TraceUtil.initTracer(conf);
672
673    this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
674        true, null, racks, hosts, null);
675
676    // Set this just-started cluster as our filesystem.
677    setFs();
678
679    // Wait for the cluster to be totally up
680    this.dfsCluster.waitClusterUp();
681
682    //reset the test directory for test file system
683    dataTestDirOnTestFS = null;
684    String dataTestDir = getDataTestDir().toString();
685    conf.set(HConstants.HBASE_DIR, dataTestDir);
686    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);
687
688    return this.dfsCluster;
689  }
690
691  public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException {
692    createDirsAndSetProperties();
693    dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
694        null, null, null);
695    return dfsCluster;
696  }
697
698  /** This is used before starting HDFS and map-reduce mini-clusters */
699  private void createDirsAndSetProperties() throws IOException {
700    setupClusterTestDir();
701    conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
702    System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
703    createDirAndSetProperty("cache_data", "test.cache.data");
704    createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
705    hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
706    createDirAndSetProperty("mapred_local", "mapreduce.cluster.local.dir");
707    createDirAndSetProperty("mapred_temp", "mapreduce.cluster.temp.dir");
708    enableShortCircuit();
709
710    Path root = getDataTestDirOnTestFS("hadoop");
711    conf.set(MapreduceTestingShim.getMROutputDirProp(),
712      new Path(root, "mapred-output-dir").toString());
713    conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());
714    conf.set("mapreduce.jobtracker.staging.root.dir",
715      new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
716    conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());
717    conf.set("yarn.app.mapreduce.am.staging-dir",
718      new Path(root, "mapreduce-am-staging-root-dir").toString());
719  }
720
721  /**
722   *  Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating
723   *  new column families. Default to false.
724   */
725  public boolean isNewVersionBehaviorEnabled(){
726    final String propName = "hbase.tests.new.version.behavior";
727    String v = System.getProperty(propName);
728    if (v != null){
729      return Boolean.parseBoolean(v);
730    }
731    return false;
732  }
733
734  /**
735   *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
736   *  This allows to specify this parameter on the command line.
737   *   If not set, default is true.
738   */
739  public boolean isReadShortCircuitOn(){
740    final String propName = "hbase.tests.use.shortcircuit.reads";
741    String readOnProp = System.getProperty(propName);
742    if (readOnProp != null){
743      return  Boolean.parseBoolean(readOnProp);
744    } else {
745      return conf.getBoolean(propName, false);
746    }
747  }
748
749  /** Enable the short circuit read, unless configured differently.
750   * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
751   */
752  private void enableShortCircuit() {
753    if (isReadShortCircuitOn()) {
754      String curUser = System.getProperty("user.name");
755      LOG.info("read short circuit is ON for user " + curUser);
756      // read short circuit, for hdfs
757      conf.set("dfs.block.local-path-access.user", curUser);
758      // read short circuit, for hbase
759      conf.setBoolean("dfs.client.read.shortcircuit", true);
760      // Skip checking checksum, for the hdfs client and the datanode
761      conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
762    } else {
763      LOG.info("read short circuit is OFF");
764    }
765  }
766
767  private String createDirAndSetProperty(final String relPath, String property) {
768    String path = getDataTestDir(relPath).toString();
769    System.setProperty(property, path);
770    conf.set(property, path);
771    new File(path).mkdirs();
772    LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
773    return path;
774  }
775
776  /**
777   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
778   * or does nothing.
779   * @throws IOException
780   */
781  public void shutdownMiniDFSCluster() throws IOException {
782    if (this.dfsCluster != null) {
783      // The below throws an exception per dn, AsynchronousCloseException.
784      this.dfsCluster.shutdown();
785      dfsCluster = null;
786      dataTestDirOnTestFS = null;
787      FSUtils.setFsDefault(this.conf, new Path("file:///"));
788    }
789  }
790
791
792  /**
793   * Start up a minicluster of hbase, dfs, and zookeeper.
794   * @throws Exception
795   * @return Mini hbase cluster instance created.
796   * @see #shutdownMiniDFSCluster()
797   */
798  public MiniHBaseCluster startMiniCluster() throws Exception {
799    return startMiniCluster(1, 1);
800  }
801
802  /**
803   * Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
804   * @throws Exception
805   * @return Mini hbase cluster instance created.
806   * @see #shutdownMiniDFSCluster()
807   */
808  public MiniHBaseCluster startMiniCluster(boolean withWALDir) throws Exception {
809    return startMiniCluster(1, 1, 1, null, null, null, false, withWALDir);
810  }
811
812  /**
813   * Start up a minicluster of hbase, dfs, and zookeeper.
814   * Set the <code>create</code> flag to create root or data directory path or not
815   * (will overwrite if dir already exists)
816   * @throws Exception
817   * @return Mini hbase cluster instance created.
818   * @see #shutdownMiniDFSCluster()
819   */
820  public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create)
821  throws Exception {
822    return startMiniCluster(1, numSlaves, create);
823  }
824
825  /**
826   * Start up a minicluster of hbase, optionally dfs, and zookeeper.
827   * Modifies Configuration.  Homes the cluster data directory under a random
828   * subdirectory in a directory under System property test.build.data.
829   * Directory is cleaned up on exit.
830   * @param numSlaves Number of slaves to start up.  We'll start this many
831   * datanodes and regionservers.  If numSlaves is > 1, then make sure
832   * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
833   * bind errors.
834   * @throws Exception
835   * @see #shutdownMiniCluster()
836   * @return Mini hbase cluster instance created.
837   */
838  public MiniHBaseCluster startMiniCluster(final int numSlaves)
839  throws Exception {
840    return startMiniCluster(1, numSlaves, false);
841  }
842
843  public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create, boolean withWALDir)
844          throws Exception {
845    return startMiniCluster(1, numSlaves, numSlaves, null, null, null, create, withWALDir);
846  }
847
848  /**
849   * Start minicluster. Whether to create a new root or data dir path even if such a path
850   * has been created earlier is decided based on flag <code>create</code>
851   * @throws Exception
852   * @see #shutdownMiniCluster()
853   * @return Mini hbase cluster instance created.
854   */
855  public MiniHBaseCluster startMiniCluster(final int numMasters,
856      final int numSlaves, boolean create)
857    throws Exception {
858      return startMiniCluster(numMasters, numSlaves, null, create);
859  }
860
861  /**
862   * start minicluster
863   * @throws Exception
864   * @see #shutdownMiniCluster()
865   * @return Mini hbase cluster instance created.
866   */
867  public MiniHBaseCluster startMiniCluster(final int numMasters,
868    final int numSlaves)
869  throws Exception {
870    return startMiniCluster(numMasters, numSlaves, null, false);
871  }
872
873  public MiniHBaseCluster startMiniCluster(final int numMasters,
874      final int numSlaves, final String[] dataNodeHosts, boolean create)
875      throws Exception {
876    return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
877        null, null, create, false);
878  }
879
880  /**
881   * Start up a minicluster of hbase, optionally dfs, and zookeeper.
882   * Modifies Configuration.  Homes the cluster data directory under a random
883   * subdirectory in a directory under System property test.build.data.
884   * Directory is cleaned up on exit.
885   * @param numMasters Number of masters to start up.  We'll start this many
886   * hbase masters.  If numMasters > 1, you can find the active/primary master
887   * with {@link MiniHBaseCluster#getMaster()}.
888   * @param numSlaves Number of slaves to start up.  We'll start this many
889   * regionservers. If dataNodeHosts == null, this also indicates the number of
890   * datanodes to start. If dataNodeHosts != null, the number of datanodes is
891   * based on dataNodeHosts.length.
892   * If numSlaves is > 1, then make sure
893   * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
894   * bind errors.
895   * @param dataNodeHosts hostnames DNs to run on.
896   * This is useful if you want to run datanode on distinct hosts for things
897   * like HDFS block location verification.
898   * If you start MiniDFSCluster without host names,
899   * all instances of the datanodes will have the same host name.
900   * @throws Exception
901   * @see #shutdownMiniCluster()
902   * @return Mini hbase cluster instance created.
903   */
904  public MiniHBaseCluster startMiniCluster(final int numMasters,
905      final int numSlaves, final String[] dataNodeHosts) throws Exception {
906    return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
907        null, null);
908  }
909
910  /**
911   * Same as {@link #startMiniCluster(int, int)}, but with custom number of datanodes.
912   * @param numDataNodes Number of data nodes.
913   */
914  public MiniHBaseCluster startMiniCluster(final int numMasters,
915      final int numSlaves, final int numDataNodes) throws Exception {
916    return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
917  }
918
919  /**
920   * Start up a minicluster of hbase, optionally dfs, and zookeeper.
921   * Modifies Configuration.  Homes the cluster data directory under a random
922   * subdirectory in a directory under System property test.build.data.
923   * Directory is cleaned up on exit.
924   * @param numMasters Number of masters to start up.  We'll start this many
925   * hbase masters.  If numMasters > 1, you can find the active/primary master
926   * with {@link MiniHBaseCluster#getMaster()}.
927   * @param numSlaves Number of slaves to start up.  We'll start this many
928   * regionservers. If dataNodeHosts == null, this also indicates the number of
929   * datanodes to start. If dataNodeHosts != null, the number of datanodes is
930   * based on dataNodeHosts.length.
931   * If numSlaves is > 1, then make sure
932   * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
933   * bind errors.
934   * @param dataNodeHosts hostnames DNs to run on.
935   * This is useful if you want to run datanode on distinct hosts for things
936   * like HDFS block location verification.
937   * If you start MiniDFSCluster without host names,
938   * all instances of the datanodes will have the same host name.
939   * @param masterClass The class to use as HMaster, or null for default
940   * @param regionserverClass The class to use as HRegionServer, or null for
941   * default
942   * @throws Exception
943   * @see #shutdownMiniCluster()
944   * @return Mini hbase cluster instance created.
945   */
946  public MiniHBaseCluster startMiniCluster(final int numMasters,
947      final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
948      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
949          throws Exception {
950    return startMiniCluster(
951        numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
952  }
953
954  public MiniHBaseCluster startMiniCluster(final int numMasters,
955      final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
956      Class<? extends HMaster> masterClass,
957      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
958    throws Exception {
959    return startMiniCluster(numMasters, numSlaves, numDataNodes, dataNodeHosts,
960        masterClass, regionserverClass, false, false);
961  }
962
963  /**
964   * Same as {@link #startMiniCluster(int, int, String[], Class, Class)}, but with custom
965   * number of datanodes.
966   * @param numDataNodes Number of data nodes.
967   * @param create Set this flag to create a new
968   * root or data directory path or not (will overwrite if exists already).
969   */
970  public MiniHBaseCluster startMiniCluster(final int numMasters,
971    final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
972    Class<? extends HMaster> masterClass,
973    Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
974    boolean create, boolean withWALDir)
975  throws Exception {
976    if (dataNodeHosts != null && dataNodeHosts.length != 0) {
977      numDataNodes = dataNodeHosts.length;
978    }
979
980    LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
981        numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
982
983    // If we already put up a cluster, fail.
984    if (miniClusterRunning) {
985      throw new IllegalStateException("A mini-cluster is already running");
986    }
987    miniClusterRunning = true;
988
989    setupClusterTestDir();
990    System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
991
992    // Bring up mini dfs cluster. This spews a bunch of warnings about missing
993    // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
994    if(this.dfsCluster == null) {
995      LOG.info("STARTING DFS");
996      dfsCluster = startMiniDFSCluster(numDataNodes, dataNodeHosts);
997    } else LOG.info("NOT STARTING DFS");
998
999    // Start up a zk cluster.
1000    if (getZkCluster() == null) {
1001      startMiniZKCluster();
1002    }
1003
1004    // Start the MiniHBaseCluster
1005    return startMiniHBaseCluster(numMasters, numSlaves, null, masterClass,
1006      regionserverClass, create, withWALDir);
1007  }
1008
1009  public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
1010      throws IOException, InterruptedException {
1011     return startMiniHBaseCluster(numMasters, numSlaves, null);
1012  }
1013
1014  public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves,
1015      List<Integer> rsPorts) throws IOException, InterruptedException {
1016    return startMiniHBaseCluster(numMasters, numSlaves, rsPorts, null, null, false, false);
1017  }
1018
1019  /**
1020   * Starts up mini hbase cluster.  Usually used after call to
1021   * {@link #startMiniCluster(int, int)} when doing stepped startup of clusters.
1022   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
1023   * @param rsPorts Ports that RegionServer should use; pass ports if you want to test cluster
1024   *                restart where for sure the regionservers come up on same address+port (but
1025   *                just with different startcode); by default mini hbase clusters choose new
1026   *                arbitrary ports on each cluster start.
1027   * @param create Whether to create a
1028   * root or data directory path or not; will overwrite if exists already.
1029   * @return Reference to the hbase mini hbase cluster.
1030   * @throws IOException
1031   * @throws InterruptedException
1032   * @see #startMiniCluster()
1033   */
1034  public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
1035        final int numSlaves, List<Integer> rsPorts, Class<? extends HMaster> masterClass,
1036        Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
1037        boolean create, boolean withWALDir)
1038  throws IOException, InterruptedException {
1039    // Now do the mini hbase cluster.  Set the hbase.rootdir in config.
1040    createRootDir(create);
1041    if (withWALDir) {
1042      createWALRootDir();
1043    }
1044    // Set the hbase.fs.tmp.dir config to make sure that we have some default value. This is
1045    // for tests that do not read hbase-defaults.xml
1046    setHBaseFsTmpDir();
1047
1048    // These settings will make the server waits until this exact number of
1049    // regions servers are connected.
1050    if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
1051      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
1052    }
1053    if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
1054      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
1055    }
1056
1057    Configuration c = new Configuration(this.conf);
1058    TraceUtil.initTracer(c);
1059    this.hbaseCluster =
1060        new MiniHBaseCluster(c, numMasters, numSlaves, rsPorts, masterClass, regionserverClass);
1061    // Don't leave here till we've done a successful scan of the hbase:meta
1062    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
1063    ResultScanner s = t.getScanner(new Scan());
1064    while (s.next() != null) {
1065      continue;
1066    }
1067    s.close();
1068    t.close();
1069
1070    getAdmin(); // create immediately the hbaseAdmin
1071    LOG.info("Minicluster is up; activeMaster=" + this.getHBaseCluster().getMaster());
1072
1073    return (MiniHBaseCluster)this.hbaseCluster;
1074  }
1075
1076  /**
1077   * Starts the hbase cluster up again after shutting it down previously in a
1078   * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
1079   * @param servers number of region servers
1080   */
1081  public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
1082    if (hbaseAdmin != null) {
1083      hbaseAdmin.close();
1084      hbaseAdmin = null;
1085    }
1086    if (this.connection != null) {
1087      this.connection.close();
1088      this.connection = null;
1089    }
1090    this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
1091    // Don't leave here till we've done a successful scan of the hbase:meta
1092    Connection conn = ConnectionFactory.createConnection(this.conf);
1093    Table t = conn.getTable(TableName.META_TABLE_NAME);
1094    ResultScanner s = t.getScanner(new Scan());
1095    while (s.next() != null) {
1096      // do nothing
1097    }
1098    LOG.info("HBase has been restarted");
1099    s.close();
1100    t.close();
1101    conn.close();
1102  }
1103
1104  /**
1105   * @return Current mini hbase cluster. Only has something in it after a call
1106   * to {@link #startMiniCluster()}.
1107   * @see #startMiniCluster()
1108   */
1109  public MiniHBaseCluster getMiniHBaseCluster() {
1110    if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
1111      return (MiniHBaseCluster)this.hbaseCluster;
1112    }
1113    throw new RuntimeException(hbaseCluster + " not an instance of " +
1114                               MiniHBaseCluster.class.getName());
1115  }
1116
1117  /**
1118   * Stops mini hbase, zk, and hdfs clusters.
1119   * @throws IOException
1120   * @see #startMiniCluster(int)
1121   */
1122  public void shutdownMiniCluster() throws Exception {
1123    LOG.info("Shutting down minicluster");
1124    shutdownMiniHBaseCluster();
1125    shutdownMiniDFSCluster();
1126    shutdownMiniZKCluster();
1127
1128    cleanupTestDir();
1129    miniClusterRunning = false;
1130    LOG.info("Minicluster is down");
1131  }
1132
1133  /**
1134   * Shutdown HBase mini cluster.Does not shutdown zk or dfs if running.
1135   * @throws java.io.IOException in case command is unsuccessful
1136   */
1137  public void shutdownMiniHBaseCluster() throws IOException {
1138    cleanup();
1139    if (this.hbaseCluster != null) {
1140      this.hbaseCluster.shutdown();
1141      // Wait till hbase is down before going on to shutdown zk.
1142      this.hbaseCluster.waitUntilShutDown();
1143      this.hbaseCluster = null;
1144    }
1145    if (zooKeeperWatcher != null) {
1146      zooKeeperWatcher.close();
1147      zooKeeperWatcher = null;
1148    }
1149  }
1150
1151  /**
1152   * Abruptly Shutdown HBase mini cluster. Does not shutdown zk or dfs if running.
1153   * @throws java.io.IOException throws in case command is unsuccessful
1154   */
1155  public void killMiniHBaseCluster() throws IOException {
1156    cleanup();
1157    if (this.hbaseCluster != null) {
1158      getMiniHBaseCluster().killAll();
1159      this.hbaseCluster = null;
1160    }
1161    if (zooKeeperWatcher != null) {
1162      zooKeeperWatcher.close();
1163      zooKeeperWatcher = null;
1164    }
1165  }
1166
1167  // close hbase admin, close current connection and reset MIN MAX configs for RS.
1168  private void cleanup() throws IOException {
1169    if (hbaseAdmin != null) {
1170      hbaseAdmin.close();
1171      hbaseAdmin = null;
1172    }
1173    if (this.connection != null) {
1174      this.connection.close();
1175      this.connection = null;
1176    }
1177    // unset the configuration for MIN and MAX RS to start
1178    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1179    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1180  }
1181
1182  /**
1183   * Returns the path to the default root dir the minicluster uses. If <code>create</code>
1184   * is true, a new root directory path is fetched irrespective of whether it has been fetched
1185   * before or not. If false, previous path is used.
1186   * Note: this does not cause the root dir to be created.
1187   * @return Fully qualified path for the default hbase root dir
1188   * @throws IOException
1189   */
1190  public Path getDefaultRootDirPath(boolean create) throws IOException {
1191    if (!create) {
1192      return getDataTestDirOnTestFS();
1193    } else {
1194      return getNewDataTestDirOnTestFS();
1195    }
1196  }
1197
1198  /**
1199   * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
1200   * except that <code>create</code> flag is false.
1201   * Note: this does not cause the root dir to be created.
1202   * @return Fully qualified path for the default hbase root dir
1203   * @throws IOException
1204   */
1205  public Path getDefaultRootDirPath() throws IOException {
1206    return getDefaultRootDirPath(false);
1207  }
1208
1209  /**
1210   * Creates an hbase rootdir in user home directory.  Also creates hbase
1211   * version file.  Normally you won't make use of this method.  Root hbasedir
1212   * is created for you as part of mini cluster startup.  You'd only use this
1213   * method if you were doing manual operation.
1214   * @param create This flag decides whether to get a new
1215   * root or data directory path or not, if it has been fetched already.
1216   * Note : Directory will be made irrespective of whether path has been fetched or not.
1217   * If directory already exists, it will be overwritten
1218   * @return Fully qualified path to hbase root dir
1219   * @throws IOException
1220   */
1221  public Path createRootDir(boolean create) throws IOException {
1222    FileSystem fs = FileSystem.get(this.conf);
1223    Path hbaseRootdir = getDefaultRootDirPath(create);
1224    FSUtils.setRootDir(this.conf, hbaseRootdir);
1225    fs.mkdirs(hbaseRootdir);
1226    FSUtils.setVersion(fs, hbaseRootdir);
1227    return hbaseRootdir;
1228  }
1229
1230  /**
1231   * Same as {@link HBaseTestingUtility#createRootDir(boolean create)}
1232   * except that <code>create</code> flag is false.
1233   * @return Fully qualified path to hbase root dir
1234   * @throws IOException
1235   */
1236  public Path createRootDir() throws IOException {
1237    return createRootDir(false);
1238  }
1239
1240  /**
1241   * Creates a hbase walDir in the user's home directory.
1242   * Normally you won't make use of this method. Root hbaseWALDir
1243   * is created for you as part of mini cluster startup. You'd only use this
1244   * method if you were doing manual operation.
1245   *
1246   * @return Fully qualified path to hbase root dir
1247   * @throws IOException
1248  */
1249  public Path createWALRootDir() throws IOException {
1250    FileSystem fs = FileSystem.get(this.conf);
1251    Path walDir = getNewDataTestDirOnTestFS();
1252    FSUtils.setWALRootDir(this.conf, walDir);
1253    fs.mkdirs(walDir);
1254    return walDir;
1255  }
1256
1257  private void setHBaseFsTmpDir() throws IOException {
1258    String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
1259    if (hbaseFsTmpDirInString == null) {
1260      this.conf.set("hbase.fs.tmp.dir",  getDataTestDirOnTestFS("hbase-staging").toString());
1261      LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir"));
1262    } else {
1263      LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
1264    }
1265  }
1266
1267  /**
1268   * Flushes all caches in the mini hbase cluster
1269   * @throws IOException
1270   */
1271  public void flush() throws IOException {
1272    getMiniHBaseCluster().flushcache();
1273  }
1274
1275  /**
1276   * Flushes all caches in the mini hbase cluster
1277   * @throws IOException
1278   */
1279  public void flush(TableName tableName) throws IOException {
1280    getMiniHBaseCluster().flushcache(tableName);
1281  }
1282
1283  /**
1284   * Compact all regions in the mini hbase cluster
1285   * @throws IOException
1286   */
1287  public void compact(boolean major) throws IOException {
1288    getMiniHBaseCluster().compact(major);
1289  }
1290
1291  /**
1292   * Compact all of a table's reagion in the mini hbase cluster
1293   * @throws IOException
1294   */
1295  public void compact(TableName tableName, boolean major) throws IOException {
1296    getMiniHBaseCluster().compact(tableName, major);
1297  }
1298
1299  /**
1300   * Create a table.
1301   * @param tableName
1302   * @param family
1303   * @return A Table instance for the created table.
1304   * @throws IOException
1305   */
1306  public Table createTable(TableName tableName, String family)
1307  throws IOException{
1308    return createTable(tableName, new String[]{family});
1309  }
1310
1311  /**
1312   * Create a table.
1313   * @param tableName
1314   * @param families
1315   * @return A Table instance for the created table.
1316   * @throws IOException
1317   */
1318  public Table createTable(TableName tableName, String[] families)
1319  throws IOException {
1320    List<byte[]> fams = new ArrayList<>(families.length);
1321    for (String family : families) {
1322      fams.add(Bytes.toBytes(family));
1323    }
1324    return createTable(tableName, fams.toArray(new byte[0][]));
1325  }
1326
1327  /**
1328   * Create a table.
1329   * @param tableName
1330   * @param family
1331   * @return A Table instance for the created table.
1332   * @throws IOException
1333   */
1334  public Table createTable(TableName tableName, byte[] family)
1335  throws IOException{
1336    return createTable(tableName, new byte[][]{family});
1337  }
1338
1339  /**
1340   * Create a table with multiple regions.
1341   * @param tableName
1342   * @param family
1343   * @param numRegions
1344   * @return A Table instance for the created table.
1345   * @throws IOException
1346   */
1347  public Table createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
1348      throws IOException {
1349    if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1350    byte[] startKey = Bytes.toBytes("aaaaa");
1351    byte[] endKey = Bytes.toBytes("zzzzz");
1352    byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1353
1354    return createTable(tableName, new byte[][] { family }, splitKeys);
1355  }
1356
1357  /**
1358   * Create a table.
1359   * @param tableName
1360   * @param families
1361   * @return A Table instance for the created table.
1362   * @throws IOException
1363   */
1364  public Table createTable(TableName tableName, byte[][] families)
1365  throws IOException {
1366    return createTable(tableName, families, (byte[][]) null);
1367  }
1368
1369  /**
1370   * Create a table with multiple regions.
1371   * @param tableName
1372   * @param families
1373   * @return A Table instance for the created table.
1374   * @throws IOException
1375   */
1376  public Table createMultiRegionTable(TableName tableName, byte[][] families) throws IOException {
1377    return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE);
1378  }
1379
1380  /**
1381   * Create a table.
1382   * @param tableName
1383   * @param families
1384   * @param splitKeys
1385   * @return A Table instance for the created table.
1386   * @throws IOException
1387   */
1388  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys)
1389      throws IOException {
1390    return createTable(tableName, families, splitKeys, 1, new Configuration(getConfiguration()));
1391  }
1392
1393  /**
1394   * Create a table.
1395   * @param tableName the table name
1396   * @param families the families
1397   * @param splitKeys the splitkeys
1398   * @param replicaCount the region replica count
1399   * @return A Table instance for the created table.
1400   * @throws IOException throws IOException
1401   */
1402  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
1403      int replicaCount) throws IOException {
1404    return createTable(tableName, families, splitKeys, replicaCount,
1405      new Configuration(getConfiguration()));
1406  }
1407
1408  public Table createTable(TableName tableName, byte[][] families,
1409      int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1410  throws IOException{
1411    HTableDescriptor desc = createTableDescriptor(tableName, families, numVersions);
1412
1413    getAdmin().createTable(desc, startKey, endKey, numRegions);
1414    // HBaseAdmin only waits for regions to appear in hbase:meta we
1415    // should wait until they are assigned
1416    waitUntilAllRegionsAssigned(tableName);
1417    return getConnection().getTable(tableName);
1418  }
1419
1420  /**
1421   * Create a table.
1422   * @param htd
1423   * @param families
1424   * @param c Configuration to use
1425   * @return A Table instance for the created table.
1426   * @throws IOException
1427   */
1428  public Table createTable(TableDescriptor htd, byte[][] families, Configuration c)
1429  throws IOException {
1430    return createTable(htd, families, null, c);
1431  }
1432
1433  /**
1434   * Create a table.
1435   * @param htd table descriptor
1436   * @param families array of column families
1437   * @param splitKeys array of split keys
1438   * @param c Configuration to use
1439   * @return A Table instance for the created table.
1440   * @throws IOException if getAdmin or createTable fails
1441   */
1442  public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
1443      Configuration c) throws IOException {
1444    // Disable blooms (they are on by default as of 0.95) but we disable them here because
1445    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1446    // on is interfering.
1447    return createTable(htd, families, splitKeys, BloomType.NONE, HConstants.DEFAULT_BLOCKSIZE, c);
1448  }
1449
1450  /**
1451   * Create a table.
1452   * @param htd table descriptor
1453   * @param families array of column families
1454   * @param splitKeys array of split keys
1455   * @param type Bloom type
1456   * @param blockSize block size
1457   * @param c Configuration to use
1458   * @return A Table instance for the created table.
1459   * @throws IOException if getAdmin or createTable fails
1460   */
1461
1462  public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
1463      BloomType type, int blockSize, Configuration c) throws IOException {
1464    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
1465    for (byte[] family : families) {
1466      ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder.newBuilder(family)
1467        .setBloomFilterType(type)
1468        .setBlocksize(blockSize);
1469      if (isNewVersionBehaviorEnabled()) {
1470          cfdb.setNewVersionBehavior(true);
1471      }
1472      builder.setColumnFamily(cfdb.build());
1473    }
1474    TableDescriptor td = builder.build();
1475    getAdmin().createTable(td, splitKeys);
1476    // HBaseAdmin only waits for regions to appear in hbase:meta
1477    // we should wait until they are assigned
1478    waitUntilAllRegionsAssigned(td.getTableName());
1479    return getConnection().getTable(td.getTableName());
1480  }
1481
1482  /**
1483   * Create a table.
1484   * @param htd table descriptor
1485   * @param splitRows array of split keys
1486   * @return A Table instance for the created table.
1487   * @throws IOException
1488   */
1489  public Table createTable(TableDescriptor htd, byte[][] splitRows)
1490      throws IOException {
1491    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
1492    if (isNewVersionBehaviorEnabled()) {
1493      for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
1494         builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family)
1495           .setNewVersionBehavior(true).build());
1496      }
1497    }
1498    getAdmin().createTable(builder.build(), splitRows);
1499    // HBaseAdmin only waits for regions to appear in hbase:meta
1500    // we should wait until they are assigned
1501    waitUntilAllRegionsAssigned(htd.getTableName());
1502    return getConnection().getTable(htd.getTableName());
1503  }
1504
1505  /**
1506   * Create a table.
1507   * @param tableName the table name
1508   * @param families the families
1509   * @param splitKeys the split keys
1510   * @param replicaCount the replica count
1511   * @param c Configuration to use
1512   * @return A Table instance for the created table.
1513   * @throws IOException
1514   */
1515  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
1516      int replicaCount, final Configuration c) throws IOException {
1517    HTableDescriptor htd = new HTableDescriptor(tableName);
1518    htd.setRegionReplication(replicaCount);
1519    return createTable(htd, families, splitKeys, c);
1520  }
1521
1522  /**
1523   * Create a table.
1524   * @param tableName
1525   * @param family
1526   * @param numVersions
1527   * @return A Table instance for the created table.
1528   * @throws IOException
1529   */
1530  public Table createTable(TableName tableName, byte[] family, int numVersions)
1531  throws IOException {
1532    return createTable(tableName, new byte[][]{family}, numVersions);
1533  }
1534
1535  /**
1536   * Create a table.
1537   * @param tableName
1538   * @param families
1539   * @param numVersions
1540   * @return A Table instance for the created table.
1541   * @throws IOException
1542   */
1543  public Table createTable(TableName tableName, byte[][] families, int numVersions)
1544      throws IOException {
1545    return createTable(tableName, families, numVersions, (byte[][]) null);
1546  }
1547
1548  /**
1549   * Create a table.
1550   * @param tableName
1551   * @param families
1552   * @param numVersions
1553   * @param splitKeys
1554   * @return A Table instance for the created table.
1555   * @throws IOException
1556   */
1557  public Table createTable(TableName tableName, byte[][] families, int numVersions,
1558      byte[][] splitKeys) throws IOException {
1559    HTableDescriptor desc = new HTableDescriptor(tableName);
1560    for (byte[] family : families) {
1561      HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1562      if (isNewVersionBehaviorEnabled()) {
1563        hcd.setNewVersionBehavior(true);
1564      }
1565      desc.addFamily(hcd);
1566    }
1567    getAdmin().createTable(desc, splitKeys);
1568    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1569    // assigned
1570    waitUntilAllRegionsAssigned(tableName);
1571    return getConnection().getTable(tableName);
1572  }
1573
1574  /**
1575   * Create a table with multiple regions.
1576   * @param tableName
1577   * @param families
1578   * @param numVersions
1579   * @return A Table instance for the created table.
1580   * @throws IOException
1581   */
1582  public Table createMultiRegionTable(TableName tableName, byte[][] families, int numVersions)
1583      throws IOException {
1584    return createTable(tableName, families, numVersions, KEYS_FOR_HBA_CREATE_TABLE);
1585  }
1586
1587  /**
1588   * Create a table.
1589   * @param tableName
1590   * @param families
1591   * @param numVersions
1592   * @param blockSize
1593   * @return A Table instance for the created table.
1594   * @throws IOException
1595   */
1596  public Table createTable(TableName tableName, byte[][] families,
1597    int numVersions, int blockSize) throws IOException {
1598    HTableDescriptor desc = new HTableDescriptor(tableName);
1599    for (byte[] family : families) {
1600      HColumnDescriptor hcd = new HColumnDescriptor(family)
1601          .setMaxVersions(numVersions)
1602          .setBlocksize(blockSize);
1603      if (isNewVersionBehaviorEnabled()) {
1604        hcd.setNewVersionBehavior(true);
1605      }
1606      desc.addFamily(hcd);
1607    }
1608    getAdmin().createTable(desc);
1609    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1610    // assigned
1611    waitUntilAllRegionsAssigned(tableName);
1612    return getConnection().getTable(tableName);
1613  }
1614
1615  public Table createTable(TableName tableName, byte[][] families,
1616      int numVersions, int blockSize, String cpName) throws IOException {
1617      HTableDescriptor desc = new HTableDescriptor(tableName);
1618      for (byte[] family : families) {
1619        HColumnDescriptor hcd = new HColumnDescriptor(family)
1620            .setMaxVersions(numVersions)
1621            .setBlocksize(blockSize);
1622        if (isNewVersionBehaviorEnabled()) {
1623          hcd.setNewVersionBehavior(true);
1624        }
1625        desc.addFamily(hcd);
1626      }
1627      if(cpName != null) {
1628        desc.addCoprocessor(cpName);
1629      }
1630      getAdmin().createTable(desc);
1631      // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1632      // assigned
1633      waitUntilAllRegionsAssigned(tableName);
1634      return getConnection().getTable(tableName);
1635    }
1636
1637  /**
1638   * Create a table.
1639   * @param tableName
1640   * @param families
1641   * @param numVersions
1642   * @return A Table instance for the created table.
1643   * @throws IOException
1644   */
1645  public Table createTable(TableName tableName, byte[][] families,
1646      int[] numVersions)
1647  throws IOException {
1648    HTableDescriptor desc = new HTableDescriptor(tableName);
1649    int i = 0;
1650    for (byte[] family : families) {
1651      HColumnDescriptor hcd = new HColumnDescriptor(family)
1652          .setMaxVersions(numVersions[i]);
1653      if (isNewVersionBehaviorEnabled()) {
1654        hcd.setNewVersionBehavior(true);
1655      }
1656      desc.addFamily(hcd);
1657      i++;
1658    }
1659    getAdmin().createTable(desc);
1660    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1661    // assigned
1662    waitUntilAllRegionsAssigned(tableName);
1663    return getConnection().getTable(tableName);
1664  }
1665
1666  /**
1667   * Create a table.
1668   * @param tableName
1669   * @param family
1670   * @param splitRows
1671   * @return A Table instance for the created table.
1672   * @throws IOException
1673   */
1674  public Table createTable(TableName tableName, byte[] family, byte[][] splitRows)
1675      throws IOException {
1676    HTableDescriptor desc = new HTableDescriptor(tableName);
1677    HColumnDescriptor hcd = new HColumnDescriptor(family);
1678    if (isNewVersionBehaviorEnabled()) {
1679      hcd.setNewVersionBehavior(true);
1680    }
1681    desc.addFamily(hcd);
1682    getAdmin().createTable(desc, splitRows);
1683    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1684    // assigned
1685    waitUntilAllRegionsAssigned(tableName);
1686    return getConnection().getTable(tableName);
1687  }
1688
1689  /**
1690   * Create a table with multiple regions.
1691   * @param tableName
1692   * @param family
1693   * @return A Table instance for the created table.
1694   * @throws IOException
1695   */
1696  public Table createMultiRegionTable(TableName tableName, byte[] family) throws IOException {
1697    return createTable(tableName, family, KEYS_FOR_HBA_CREATE_TABLE);
1698  }
1699
1700  /**
1701   * Modify a table, synchronous. Waiting logic similar to that of {@code admin.rb#alter_status}.
1702   */
1703  @SuppressWarnings("serial")
1704  public static void modifyTableSync(Admin admin, TableDescriptor desc)
1705      throws IOException, InterruptedException {
1706    admin.modifyTable(desc);
1707    Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
1708      setFirst(0);
1709      setSecond(0);
1710    }};
1711    int i = 0;
1712    do {
1713      status = admin.getAlterStatus(desc.getTableName());
1714      if (status.getSecond() != 0) {
1715        LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond()
1716          + " regions updated.");
1717        Thread.sleep(1 * 1000L);
1718      } else {
1719        LOG.debug("All regions updated.");
1720        break;
1721      }
1722    } while (status.getFirst() != 0 && i++ < 500);
1723    if (status.getFirst() != 0) {
1724      throw new IOException("Failed to update all regions even after 500 seconds.");
1725    }
1726  }
1727
1728  /**
1729   * Set the number of Region replicas.
1730   */
1731  public static void setReplicas(Admin admin, TableName table, int replicaCount)
1732      throws IOException, InterruptedException {
1733    admin.disableTable(table);
1734    HTableDescriptor desc = new HTableDescriptor(admin.getTableDescriptor(table));
1735    desc.setRegionReplication(replicaCount);
1736    admin.modifyTable(desc.getTableName(), desc);
1737    admin.enableTable(table);
1738  }
1739
1740  /**
1741   * Drop an existing table
1742   * @param tableName existing table
1743   */
1744  public void deleteTable(TableName tableName) throws IOException {
1745    try {
1746      getAdmin().disableTable(tableName);
1747    } catch (TableNotEnabledException e) {
1748      LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1749    }
1750    getAdmin().deleteTable(tableName);
1751  }
1752
1753  /**
1754   * Drop an existing table
1755   * @param tableName existing table
1756   */
1757  public void deleteTableIfAny(TableName tableName) throws IOException {
1758    try {
1759      deleteTable(tableName);
1760    } catch (TableNotFoundException e) {
1761      // ignore
1762    }
1763  }
1764
1765  // ==========================================================================
1766  // Canned table and table descriptor creation
1767  // TODO replace HBaseTestCase
1768
1769  public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1770  public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1771  public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1772  public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1773  private static final int MAXVERSIONS = 3;
1774
1775  public static final char FIRST_CHAR = 'a';
1776  public static final char LAST_CHAR = 'z';
1777  public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1778  public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1779
1780  /**
1781   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
1782   *   {@link #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)} instead.
1783   * @see #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)
1784   * @see <a href="https://issues.apache.org/jira/browse/HBASE-13893">HBASE-13893</a>
1785   */
1786  @Deprecated
1787  public HTableDescriptor createTableDescriptor(final String name,
1788      final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1789    return this.createTableDescriptor(TableName.valueOf(name), minVersions, versions, ttl,
1790        keepDeleted);
1791  }
1792
1793  /**
1794   * Create a table of name <code>name</code>.
1795   * @param name Name to give table.
1796   * @return Column descriptor.
1797   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
1798   *   {@link #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)} instead.
1799   * @see #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)
1800   * @see <a href="https://issues.apache.org/jira/browse/HBASE-13893">HBASE-13893</a>
1801   */
1802  @Deprecated
1803  public HTableDescriptor createTableDescriptor(final String name) {
1804    return createTableDescriptor(TableName.valueOf(name),  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1805        MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1806  }
1807
1808  public HTableDescriptor createTableDescriptor(final TableName name,
1809      final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1810    HTableDescriptor htd = new HTableDescriptor(name);
1811    for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1812      HColumnDescriptor hcd = new HColumnDescriptor(cfName)
1813          .setMinVersions(minVersions)
1814          .setMaxVersions(versions)
1815          .setKeepDeletedCells(keepDeleted)
1816          .setBlockCacheEnabled(false)
1817          .setTimeToLive(ttl);
1818      if (isNewVersionBehaviorEnabled()) {
1819          hcd.setNewVersionBehavior(true);
1820      }
1821      htd.addFamily(hcd);
1822    }
1823    return htd;
1824  }
1825
1826  /**
1827   * Create a table of name <code>name</code>.
1828   * @param name Name to give table.
1829   * @return Column descriptor.
1830   */
1831  public HTableDescriptor createTableDescriptor(final TableName name) {
1832    return createTableDescriptor(name,  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1833        MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1834  }
1835
1836  public HTableDescriptor createTableDescriptor(final TableName tableName,
1837      byte[] family) {
1838    return createTableDescriptor(tableName, new byte[][] {family}, 1);
1839  }
1840
1841  public HTableDescriptor createTableDescriptor(final TableName tableName,
1842      byte[][] families, int maxVersions) {
1843    HTableDescriptor desc = new HTableDescriptor(tableName);
1844    for (byte[] family : families) {
1845      HColumnDescriptor hcd = new HColumnDescriptor(family)
1846          .setMaxVersions(maxVersions);
1847      if (isNewVersionBehaviorEnabled()) {
1848          hcd.setNewVersionBehavior(true);
1849      }
1850      desc.addFamily(hcd);
1851    }
1852    return desc;
1853  }
1854
1855  /**
1856   * Create an HRegion that writes to the local tmp dirs
1857   * @param desc a table descriptor indicating which table the region belongs to
1858   * @param startKey the start boundary of the region
1859   * @param endKey the end boundary of the region
1860   * @return a region that writes to local dir for testing
1861   * @throws IOException
1862   */
1863  public HRegion createLocalHRegion(TableDescriptor desc, byte [] startKey,
1864      byte [] endKey)
1865  throws IOException {
1866    HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1867    return createLocalHRegion(hri, desc);
1868  }
1869
1870  /**
1871   * Create an HRegion that writes to the local tmp dirs. Creates the WAL for you. Be sure to call
1872   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when you're finished with it.
1873   */
1874  public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) throws IOException {
1875    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), desc);
1876  }
1877
1878  /**
1879   * Create an HRegion that writes to the local tmp dirs with specified wal
1880   * @param info regioninfo
1881   * @param desc table descriptor
1882   * @param wal wal for this region.
1883   * @return created hregion
1884   * @throws IOException
1885   */
1886  public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc, WAL wal)
1887      throws IOException {
1888    return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, wal);
1889  }
1890
1891  /**
1892   * @param tableName the name of the table
1893   * @param startKey the start key of the region
1894   * @param stopKey the stop key of the region
1895   * @param callingMethod the name of the calling method probably a test method
1896   * @param conf the configuration to use
1897   * @param isReadOnly {@code true} if the table is read only, {@code false} otherwise
1898   * @param families the column families to use
1899   * @throws IOException if an IO problem is encountered
1900   * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)}
1901   *         when done.
1902   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
1903   *   {@link #createLocalHRegion(TableName, byte[], byte[], boolean, Durability, WAL, byte[]...)}
1904   *   instead.
1905   * @see #createLocalHRegion(TableName, byte[], byte[], boolean, Durability, WAL, byte[]...)
1906   * @see <a href="https://issues.apache.org/jira/browse/HBASE-13893">HBASE-13893</a>
1907   */
1908  @Deprecated
1909  public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1910      String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1911      WAL wal, byte[]... families) throws IOException {
1912    return this
1913        .createLocalHRegion(TableName.valueOf(tableName), startKey, stopKey, isReadOnly, durability,
1914            wal, families);
1915  }
1916
1917  /**
1918   * @param tableName
1919   * @param startKey
1920   * @param stopKey
1921   * @param isReadOnly
1922   * @param families
1923   * @return A region on which you must call
1924   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
1925   * @throws IOException
1926   */
1927  public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
1928      boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException {
1929    return createLocalHRegionWithInMemoryFlags(tableName,startKey, stopKey, isReadOnly,
1930        durability, wal, null, families);
1931  }
1932
1933  public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] startKey,
1934      byte[] stopKey,
1935      boolean isReadOnly, Durability durability, WAL wal, boolean[] compactedMemStore,
1936      byte[]... families)
1937      throws IOException {
1938    HTableDescriptor htd = new HTableDescriptor(tableName);
1939    htd.setReadOnly(isReadOnly);
1940    int i=0;
1941    for (byte[] family : families) {
1942      HColumnDescriptor hcd = new HColumnDescriptor(family);
1943      if(compactedMemStore != null && i < compactedMemStore.length) {
1944        hcd.setInMemoryCompaction(MemoryCompactionPolicy.BASIC);
1945      } else {
1946        hcd.setInMemoryCompaction(MemoryCompactionPolicy.NONE);
1947
1948      }
1949      i++;
1950      // Set default to be three versions.
1951      hcd.setMaxVersions(Integer.MAX_VALUE);
1952      htd.addFamily(hcd);
1953    }
1954    htd.setDurability(durability);
1955    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1956    return createLocalHRegion(info, htd, wal);
1957  }
1958
1959  //
1960  // ==========================================================================
1961
1962  /**
1963   * Provide an existing table name to truncate.
1964   * Scans the table and issues a delete for each row read.
1965   * @param tableName existing table
1966   * @return HTable to that new table
1967   * @throws IOException
1968   */
1969  public Table deleteTableData(TableName tableName) throws IOException {
1970    Table table = getConnection().getTable(tableName);
1971    Scan scan = new Scan();
1972    ResultScanner resScan = table.getScanner(scan);
1973    for(Result res : resScan) {
1974      Delete del = new Delete(res.getRow());
1975      table.delete(del);
1976    }
1977    resScan = table.getScanner(scan);
1978    resScan.close();
1979    return table;
1980  }
1981
1982  /**
1983   * Truncate a table using the admin command.
1984   * Effectively disables, deletes, and recreates the table.
1985   * @param tableName table which must exist.
1986   * @param preserveRegions keep the existing split points
1987   * @return HTable for the new table
1988   */
1989  public Table truncateTable(final TableName tableName, final boolean preserveRegions) throws
1990      IOException {
1991    Admin admin = getAdmin();
1992    if (!admin.isTableDisabled(tableName)) {
1993      admin.disableTable(tableName);
1994    }
1995    admin.truncateTable(tableName, preserveRegions);
1996    return getConnection().getTable(tableName);
1997  }
1998
1999  /**
2000   * Truncate a table using the admin command.
2001   * Effectively disables, deletes, and recreates the table.
2002   * For previous behavior of issuing row deletes, see
2003   * deleteTableData.
2004   * Expressly does not preserve regions of existing table.
2005   * @param tableName table which must exist.
2006   * @return HTable for the new table
2007   */
2008  public Table truncateTable(final TableName tableName) throws IOException {
2009    return truncateTable(tableName, false);
2010  }
2011
2012  /**
2013   * Load table with rows from 'aaa' to 'zzz'.
2014   * @param t Table
2015   * @param f Family
2016   * @return Count of rows loaded.
2017   * @throws IOException
2018   */
2019  public int loadTable(final Table t, final byte[] f) throws IOException {
2020    return loadTable(t, new byte[][] {f});
2021  }
2022
2023  /**
2024   * Load table with rows from 'aaa' to 'zzz'.
2025   * @param t Table
2026   * @param f Family
2027   * @return Count of rows loaded.
2028   * @throws IOException
2029   */
2030  public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
2031    return loadTable(t, new byte[][] {f}, null, writeToWAL);
2032  }
2033
2034  /**
2035   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2036   * @param t Table
2037   * @param f Array of Families to load
2038   * @return Count of rows loaded.
2039   * @throws IOException
2040   */
2041  public int loadTable(final Table t, final byte[][] f) throws IOException {
2042    return loadTable(t, f, null);
2043  }
2044
2045  /**
2046   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2047   * @param t Table
2048   * @param f Array of Families to load
2049   * @param value the values of the cells. If null is passed, the row key is used as value
2050   * @return Count of rows loaded.
2051   * @throws IOException
2052   */
2053  public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
2054    return loadTable(t, f, value, true);
2055  }
2056
2057  /**
2058   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2059   * @param t Table
2060   * @param f Array of Families to load
2061   * @param value the values of the cells. If null is passed, the row key is used as value
2062   * @return Count of rows loaded.
2063   * @throws IOException
2064   */
2065  public int loadTable(final Table t, final byte[][] f, byte[] value,
2066      boolean writeToWAL) throws IOException {
2067    List<Put> puts = new ArrayList<>();
2068    for (byte[] row : HBaseTestingUtility.ROWS) {
2069      Put put = new Put(row);
2070      put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
2071      for (int i = 0; i < f.length; i++) {
2072        byte[] value1 = value != null ? value : row;
2073        put.addColumn(f[i], f[i], value1);
2074      }
2075      puts.add(put);
2076    }
2077    t.put(puts);
2078    return puts.size();
2079  }
2080
2081  /** A tracker for tracking and validating table rows
2082   * generated with {@link HBaseTestingUtility#loadTable(Table, byte[])}
2083   */
2084  public static class SeenRowTracker {
2085    int dim = 'z' - 'a' + 1;
2086    int[][][] seenRows = new int[dim][dim][dim]; //count of how many times the row is seen
2087    byte[] startRow;
2088    byte[] stopRow;
2089
2090    public SeenRowTracker(byte[] startRow, byte[] stopRow) {
2091      this.startRow = startRow;
2092      this.stopRow = stopRow;
2093    }
2094
2095    void reset() {
2096      for (byte[] row : ROWS) {
2097        seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
2098      }
2099    }
2100
2101    int i(byte b) {
2102      return b - 'a';
2103    }
2104
2105    public void addRow(byte[] row) {
2106      seenRows[i(row[0])][i(row[1])][i(row[2])]++;
2107    }
2108
2109    /** Validate that all the rows between startRow and stopRow are seen exactly once, and
2110     * all other rows none
2111     */
2112    public void validate() {
2113      for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2114        for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2115          for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2116            int count = seenRows[i(b1)][i(b2)][i(b3)];
2117            int expectedCount = 0;
2118            if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
2119                && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
2120              expectedCount = 1;
2121            }
2122            if (count != expectedCount) {
2123              String row = new String(new byte[] {b1,b2,b3}, StandardCharsets.UTF_8);
2124              throw new RuntimeException("Row:" + row + " has a seen count of " + count + " " +
2125                  "instead of " + expectedCount);
2126            }
2127          }
2128        }
2129      }
2130    }
2131  }
2132
2133  public int loadRegion(final HRegion r, final byte[] f) throws IOException {
2134    return loadRegion(r, f, false);
2135  }
2136
2137  public int loadRegion(final Region r, final byte[] f) throws IOException {
2138    return loadRegion((HRegion)r, f);
2139  }
2140
2141  /**
2142   * Load region with rows from 'aaa' to 'zzz'.
2143   * @param r Region
2144   * @param f Family
2145   * @param flush flush the cache if true
2146   * @return Count of rows loaded.
2147   * @throws IOException
2148   */
2149  public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
2150  throws IOException {
2151    byte[] k = new byte[3];
2152    int rowCount = 0;
2153    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2154      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2155        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2156          k[0] = b1;
2157          k[1] = b2;
2158          k[2] = b3;
2159          Put put = new Put(k);
2160          put.setDurability(Durability.SKIP_WAL);
2161          put.addColumn(f, null, k);
2162          if (r.getWAL() == null) {
2163            put.setDurability(Durability.SKIP_WAL);
2164          }
2165          int preRowCount = rowCount;
2166          int pause = 10;
2167          int maxPause = 1000;
2168          while (rowCount == preRowCount) {
2169            try {
2170              r.put(put);
2171              rowCount++;
2172            } catch (RegionTooBusyException e) {
2173              pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
2174              Threads.sleep(pause);
2175            }
2176          }
2177        }
2178      }
2179      if (flush) {
2180        r.flush(true);
2181      }
2182    }
2183    return rowCount;
2184  }
2185
2186  public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow)
2187      throws IOException {
2188    for (int i = startRow; i < endRow; i++) {
2189      byte[] data = Bytes.toBytes(String.valueOf(i));
2190      Put put = new Put(data);
2191      put.addColumn(f, null, data);
2192      t.put(put);
2193    }
2194  }
2195
2196  public void loadRandomRows(final Table t, final byte[] f, int rowSize, int totalRows)
2197      throws IOException {
2198    Random r = new Random();
2199    byte[] row = new byte[rowSize];
2200    for (int i = 0; i < totalRows; i++) {
2201      r.nextBytes(row);
2202      Put put = new Put(row);
2203      put.addColumn(f, new byte[]{0}, new byte[]{0});
2204      t.put(put);
2205    }
2206  }
2207
2208  public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
2209      int replicaId)
2210      throws IOException {
2211    for (int i = startRow; i < endRow; i++) {
2212      String failMsg = "Failed verification of row :" + i;
2213      byte[] data = Bytes.toBytes(String.valueOf(i));
2214      Get get = new Get(data);
2215      get.setReplicaId(replicaId);
2216      get.setConsistency(Consistency.TIMELINE);
2217      Result result = table.get(get);
2218      assertTrue(failMsg, result.containsColumn(f, null));
2219      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
2220      Cell cell = result.getColumnLatestCell(f, null);
2221      assertTrue(failMsg,
2222        Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
2223          cell.getValueLength()));
2224    }
2225  }
2226
2227  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow)
2228      throws IOException {
2229    verifyNumericRows((HRegion)region, f, startRow, endRow);
2230  }
2231
2232  public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow)
2233      throws IOException {
2234    verifyNumericRows(region, f, startRow, endRow, true);
2235  }
2236
2237  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow,
2238      final boolean present) throws IOException {
2239    verifyNumericRows((HRegion)region, f, startRow, endRow, present);
2240  }
2241
2242  public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
2243      final boolean present) throws IOException {
2244    for (int i = startRow; i < endRow; i++) {
2245      String failMsg = "Failed verification of row :" + i;
2246      byte[] data = Bytes.toBytes(String.valueOf(i));
2247      Result result = region.get(new Get(data));
2248
2249      boolean hasResult = result != null && !result.isEmpty();
2250      assertEquals(failMsg + result, present, hasResult);
2251      if (!present) continue;
2252
2253      assertTrue(failMsg, result.containsColumn(f, null));
2254      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
2255      Cell cell = result.getColumnLatestCell(f, null);
2256      assertTrue(failMsg,
2257        Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
2258          cell.getValueLength()));
2259    }
2260  }
2261
2262  public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow)
2263      throws IOException {
2264    for (int i = startRow; i < endRow; i++) {
2265      byte[] data = Bytes.toBytes(String.valueOf(i));
2266      Delete delete = new Delete(data);
2267      delete.addFamily(f);
2268      t.delete(delete);
2269    }
2270  }
2271
2272  /**
2273   * Return the number of rows in the given table.
2274   */
2275  public int countRows(final Table table) throws IOException {
2276    return countRows(table, new Scan());
2277  }
2278
2279  public int countRows(final Table table, final Scan scan) throws IOException {
2280    try (ResultScanner results = table.getScanner(scan)) {
2281      int count = 0;
2282      while (results.next() != null) {
2283        count++;
2284      }
2285      return count;
2286    }
2287  }
2288
2289  public int countRows(final Table table, final byte[]... families) throws IOException {
2290    Scan scan = new Scan();
2291    for (byte[] family: families) {
2292      scan.addFamily(family);
2293    }
2294    return countRows(table, scan);
2295  }
2296
2297  /**
2298   * Return the number of rows in the given table.
2299   */
2300  public int countRows(final TableName tableName) throws IOException {
2301    Table table = getConnection().getTable(tableName);
2302    try {
2303      return countRows(table);
2304    } finally {
2305      table.close();
2306    }
2307  }
2308
2309  public int countRows(final Region region) throws IOException {
2310    return countRows(region, new Scan());
2311  }
2312
2313  public int countRows(final Region region, final Scan scan) throws IOException {
2314    InternalScanner scanner = region.getScanner(scan);
2315    try {
2316      return countRows(scanner);
2317    } finally {
2318      scanner.close();
2319    }
2320  }
2321
2322  public int countRows(final InternalScanner scanner) throws IOException {
2323    int scannedCount = 0;
2324    List<Cell> results = new ArrayList<>();
2325    boolean hasMore = true;
2326    while (hasMore) {
2327      hasMore = scanner.next(results);
2328      scannedCount += results.size();
2329      results.clear();
2330    }
2331    return scannedCount;
2332  }
2333
2334  /**
2335   * Return an md5 digest of the entire contents of a table.
2336   */
2337  public String checksumRows(final Table table) throws Exception {
2338
2339    Scan scan = new Scan();
2340    ResultScanner results = table.getScanner(scan);
2341    MessageDigest digest = MessageDigest.getInstance("MD5");
2342    for (Result res : results) {
2343      digest.update(res.getRow());
2344    }
2345    results.close();
2346    return digest.toString();
2347  }
2348
2349  /** All the row values for the data loaded by {@link #loadTable(Table, byte[])} */
2350  public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
2351  static {
2352    int i = 0;
2353    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2354      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2355        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2356          ROWS[i][0] = b1;
2357          ROWS[i][1] = b2;
2358          ROWS[i][2] = b3;
2359          i++;
2360        }
2361      }
2362    }
2363  }
2364
2365  public static final byte[][] KEYS = {
2366    HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
2367    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2368    Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2369    Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2370    Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2371    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2372    Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2373    Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2374    Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
2375  };
2376
2377  public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
2378      Bytes.toBytes("bbb"),
2379      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2380      Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2381      Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2382      Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2383      Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2384      Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2385      Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2386      Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
2387  };
2388
2389  /**
2390   * Create rows in hbase:meta for regions of the specified table with the specified
2391   * start keys.  The first startKey should be a 0 length byte array if you
2392   * want to form a proper range of regions.
2393   * @param conf
2394   * @param htd
2395   * @param startKeys
2396   * @return list of region info for regions added to meta
2397   * @throws IOException
2398   * @deprecated since 2.0 version and will be removed in 3.0 version.
2399   *             use {@link #createMultiRegionsInMeta(Configuration, TableDescriptor, byte[][])}
2400   */
2401  @Deprecated
2402  public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2403      final HTableDescriptor htd, byte [][] startKeys) throws IOException {
2404    return createMultiRegionsInMeta(conf, (TableDescriptor) htd, startKeys)
2405        .stream().map(ImmutableHRegionInfo::new).collect(Collectors.toList());
2406  }
2407  /**
2408   * Create rows in hbase:meta for regions of the specified table with the specified
2409   * start keys.  The first startKey should be a 0 length byte array if you
2410   * want to form a proper range of regions.
2411   * @param conf
2412   * @param htd
2413   * @param startKeys
2414   * @return list of region info for regions added to meta
2415   * @throws IOException
2416   */
2417  public List<RegionInfo> createMultiRegionsInMeta(final Configuration conf,
2418      final TableDescriptor htd, byte [][] startKeys)
2419  throws IOException {
2420    Table meta = getConnection().getTable(TableName.META_TABLE_NAME);
2421    Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2422    List<RegionInfo> newRegions = new ArrayList<>(startKeys.length);
2423    MetaTableAccessor
2424        .updateTableState(getConnection(), htd.getTableName(), TableState.State.ENABLED);
2425    // add custom ones
2426    for (int i = 0; i < startKeys.length; i++) {
2427      int j = (i + 1) % startKeys.length;
2428      RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName())
2429          .setStartKey(startKeys[i])
2430          .setEndKey(startKeys[j])
2431          .build();
2432      MetaTableAccessor.addRegionToMeta(getConnection(), hri);
2433      newRegions.add(hri);
2434    }
2435
2436    meta.close();
2437    return newRegions;
2438  }
2439
2440  /**
2441   * Create an unmanaged WAL. Be sure to close it when you're through.
2442   */
2443  public static WAL createWal(final Configuration conf, final Path rootDir, final RegionInfo hri)
2444      throws IOException {
2445    // The WAL subsystem will use the default rootDir rather than the passed in rootDir
2446    // unless I pass along via the conf.
2447    Configuration confForWAL = new Configuration(conf);
2448    confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
2449    return new WALFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)).getWAL(hri);
2450  }
2451
2452  /**
2453   * Create a region with it's own WAL. Be sure to call
2454   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2455   */
2456  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2457      final Configuration conf, final TableDescriptor htd) throws IOException {
2458    return createRegionAndWAL(info, rootDir, conf, htd, true);
2459  }
2460
2461  /**
2462   * Create a region with it's own WAL. Be sure to call
2463   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2464   */
2465  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2466      final Configuration conf, final TableDescriptor htd, boolean initialize)
2467      throws IOException {
2468    ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
2469    WAL wal = createWal(conf, rootDir, info);
2470    return HRegion.createHRegion(info, rootDir, conf, htd, wal, initialize);
2471  }
2472
2473  /**
2474   * Returns all rows from the hbase:meta table.
2475   *
2476   * @throws IOException When reading the rows fails.
2477   */
2478  public List<byte[]> getMetaTableRows() throws IOException {
2479    // TODO: Redo using MetaTableAccessor class
2480    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
2481    List<byte[]> rows = new ArrayList<>();
2482    ResultScanner s = t.getScanner(new Scan());
2483    for (Result result : s) {
2484      LOG.info("getMetaTableRows: row -> " +
2485        Bytes.toStringBinary(result.getRow()));
2486      rows.add(result.getRow());
2487    }
2488    s.close();
2489    t.close();
2490    return rows;
2491  }
2492
2493  /**
2494   * Returns all rows from the hbase:meta table for a given user table
2495   *
2496   * @throws IOException When reading the rows fails.
2497   */
2498  public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2499    // TODO: Redo using MetaTableAccessor.
2500    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
2501    List<byte[]> rows = new ArrayList<>();
2502    ResultScanner s = t.getScanner(new Scan());
2503    for (Result result : s) {
2504      RegionInfo info = MetaTableAccessor.getRegionInfo(result);
2505      if (info == null) {
2506        LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2507        // TODO figure out what to do for this new hosed case.
2508        continue;
2509      }
2510
2511      if (info.getTable().equals(tableName)) {
2512        LOG.info("getMetaTableRows: row -> " +
2513            Bytes.toStringBinary(result.getRow()) + info);
2514        rows.add(result.getRow());
2515      }
2516    }
2517    s.close();
2518    t.close();
2519    return rows;
2520  }
2521
2522  /**
2523   * Returns all regions of the specified table
2524   *
2525   * @param tableName the table name
2526   * @return all regions of the specified table
2527   * @throws IOException when getting the regions fails.
2528   */
2529  private List<RegionInfo> getRegions(TableName tableName) throws IOException {
2530    try (Admin admin = getConnection().getAdmin()) {
2531      return admin.getRegions(tableName);
2532    }
2533  }
2534
2535  /*
2536   * Find any other region server which is different from the one identified by parameter
2537   * @param rs
2538   * @return another region server
2539   */
2540  public HRegionServer getOtherRegionServer(HRegionServer rs) {
2541    for (JVMClusterUtil.RegionServerThread rst :
2542      getMiniHBaseCluster().getRegionServerThreads()) {
2543      if (!(rst.getRegionServer() == rs)) {
2544        return rst.getRegionServer();
2545      }
2546    }
2547    return null;
2548  }
2549
2550  /**
2551   * Tool to get the reference to the region server object that holds the
2552   * region of the specified user table.
2553   * @param tableName user table to lookup in hbase:meta
2554   * @return region server that holds it, null if the row doesn't exist
2555   * @throws IOException
2556   * @throws InterruptedException
2557   */
2558  public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2559      throws IOException, InterruptedException {
2560    List<RegionInfo> regions = getRegions(tableName);
2561    if (regions == null || regions.isEmpty()) {
2562      return null;
2563    }
2564    LOG.debug("Found " + regions.size() + " regions for table " +
2565        tableName);
2566
2567    byte[] firstRegionName = regions.stream()
2568        .filter(r -> !r.isOffline())
2569        .map(RegionInfo::getRegionName)
2570        .findFirst()
2571        .orElseThrow(() -> new IOException("online regions not found in table " + tableName));
2572
2573    LOG.debug("firstRegionName=" + Bytes.toString(firstRegionName));
2574    long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2575      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2576    int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2577      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2578    RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2579    while(retrier.shouldRetry()) {
2580      int index = getMiniHBaseCluster().getServerWith(firstRegionName);
2581      if (index != -1) {
2582        return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2583      }
2584      // Came back -1.  Region may not be online yet.  Sleep a while.
2585      retrier.sleepUntilNextRetry();
2586    }
2587    return null;
2588  }
2589
2590  /**
2591   * Starts a <code>MiniMRCluster</code> with a default number of
2592   * <code>TaskTracker</code>'s.
2593   *
2594   * @throws IOException When starting the cluster fails.
2595   */
2596  public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2597    // Set a very high max-disk-utilization percentage to avoid the NodeManagers from failing.
2598    conf.setIfUnset(
2599        "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
2600        "99.0");
2601    startMiniMapReduceCluster(2);
2602    return mrCluster;
2603  }
2604
2605  /**
2606   * Tasktracker has a bug where changing the hadoop.log.dir system property
2607   * will not change its internal static LOG_DIR variable.
2608   */
2609  private void forceChangeTaskLogDir() {
2610    Field logDirField;
2611    try {
2612      logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2613      logDirField.setAccessible(true);
2614
2615      Field modifiersField = Field.class.getDeclaredField("modifiers");
2616      modifiersField.setAccessible(true);
2617      modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2618
2619      logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2620    } catch (SecurityException e) {
2621      throw new RuntimeException(e);
2622    } catch (NoSuchFieldException e) {
2623      // TODO Auto-generated catch block
2624      throw new RuntimeException(e);
2625    } catch (IllegalArgumentException e) {
2626      throw new RuntimeException(e);
2627    } catch (IllegalAccessException e) {
2628      throw new RuntimeException(e);
2629    }
2630  }
2631
2632  /**
2633   * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2634   * filesystem.
2635   * @param servers  The number of <code>TaskTracker</code>'s to start.
2636   * @throws IOException When starting the cluster fails.
2637   */
2638  private void startMiniMapReduceCluster(final int servers) throws IOException {
2639    if (mrCluster != null) {
2640      throw new IllegalStateException("MiniMRCluster is already running");
2641    }
2642    LOG.info("Starting mini mapreduce cluster...");
2643    setupClusterTestDir();
2644    createDirsAndSetProperties();
2645
2646    forceChangeTaskLogDir();
2647
2648    //// hadoop2 specific settings
2649    // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2650    // we up the VM usable so that processes don't get killed.
2651    conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2652
2653    // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2654    // this avoids the problem by disabling speculative task execution in tests.
2655    conf.setBoolean("mapreduce.map.speculative", false);
2656    conf.setBoolean("mapreduce.reduce.speculative", false);
2657    ////
2658
2659    // Allow the user to override FS URI for this map-reduce cluster to use.
2660    mrCluster = new MiniMRCluster(servers,
2661      FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2662      null, null, new JobConf(this.conf));
2663    JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2664    if (jobConf == null) {
2665      jobConf = mrCluster.createJobConf();
2666    }
2667
2668    jobConf.set("mapreduce.cluster.local.dir",
2669      conf.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not
2670    LOG.info("Mini mapreduce cluster started");
2671
2672    // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2673    // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
2674    // necessary config properties here.  YARN-129 required adding a few properties.
2675    conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
2676    // this for mrv2 support; mr1 ignores this
2677    conf.set("mapreduce.framework.name", "yarn");
2678    conf.setBoolean("yarn.is.minicluster", true);
2679    String rmAddress = jobConf.get("yarn.resourcemanager.address");
2680    if (rmAddress != null) {
2681      conf.set("yarn.resourcemanager.address", rmAddress);
2682    }
2683    String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2684    if (historyAddress != null) {
2685      conf.set("mapreduce.jobhistory.address", historyAddress);
2686    }
2687    String schedulerAddress =
2688      jobConf.get("yarn.resourcemanager.scheduler.address");
2689    if (schedulerAddress != null) {
2690      conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2691    }
2692    String mrJobHistoryWebappAddress =
2693      jobConf.get("mapreduce.jobhistory.webapp.address");
2694    if (mrJobHistoryWebappAddress != null) {
2695      conf.set("mapreduce.jobhistory.webapp.address", mrJobHistoryWebappAddress);
2696    }
2697    String yarnRMWebappAddress =
2698      jobConf.get("yarn.resourcemanager.webapp.address");
2699    if (yarnRMWebappAddress != null) {
2700      conf.set("yarn.resourcemanager.webapp.address", yarnRMWebappAddress);
2701    }
2702  }
2703
2704  /**
2705   * Stops the previously started <code>MiniMRCluster</code>.
2706   */
2707  public void shutdownMiniMapReduceCluster() {
2708    if (mrCluster != null) {
2709      LOG.info("Stopping mini mapreduce cluster...");
2710      mrCluster.shutdown();
2711      mrCluster = null;
2712      LOG.info("Mini mapreduce cluster stopped");
2713    }
2714    // Restore configuration to point to local jobtracker
2715    conf.set("mapreduce.jobtracker.address", "local");
2716  }
2717
2718  /**
2719   * Create a stubbed out RegionServerService, mainly for getting FS.
2720   */
2721  public RegionServerServices createMockRegionServerService() throws IOException {
2722    return createMockRegionServerService((ServerName)null);
2723  }
2724
2725  /**
2726   * Create a stubbed out RegionServerService, mainly for getting FS.
2727   * This version is used by TestTokenAuthentication
2728   */
2729  public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws
2730      IOException {
2731    final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2732    rss.setFileSystem(getTestFileSystem());
2733    rss.setRpcServer(rpc);
2734    return rss;
2735  }
2736
2737  /**
2738   * Create a stubbed out RegionServerService, mainly for getting FS.
2739   * This version is used by TestOpenRegionHandler
2740   */
2741  public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2742    final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2743    rss.setFileSystem(getTestFileSystem());
2744    return rss;
2745  }
2746
2747  /**
2748   * Switches the logger for the given class to DEBUG level.
2749   *
2750   * @param clazz  The class for which to switch to debug logging.
2751   */
2752  public void enableDebug(Class<?> clazz) {
2753    Logger l = LoggerFactory.getLogger(clazz);
2754    if (l instanceof Log4JLogger) {
2755      ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2756    } else if (l instanceof Log4jLoggerAdapter) {
2757      LogManager.getLogger(clazz).setLevel(org.apache.log4j.Level.DEBUG);
2758    } else if (l instanceof Jdk14Logger) {
2759      ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2760    }
2761  }
2762
2763  /**
2764   * Expire the Master's session
2765   * @throws Exception
2766   */
2767  public void expireMasterSession() throws Exception {
2768    HMaster master = getMiniHBaseCluster().getMaster();
2769    expireSession(master.getZooKeeper(), false);
2770  }
2771
2772  /**
2773   * Expire a region server's session
2774   * @param index which RS
2775   */
2776  public void expireRegionServerSession(int index) throws Exception {
2777    HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2778    expireSession(rs.getZooKeeper(), false);
2779    decrementMinRegionServerCount();
2780  }
2781
2782  private void decrementMinRegionServerCount() {
2783    // decrement the count for this.conf, for newly spwaned master
2784    // this.hbaseCluster shares this configuration too
2785    decrementMinRegionServerCount(getConfiguration());
2786
2787    // each master thread keeps a copy of configuration
2788    for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2789      decrementMinRegionServerCount(master.getMaster().getConfiguration());
2790    }
2791  }
2792
2793  private void decrementMinRegionServerCount(Configuration conf) {
2794    int currentCount = conf.getInt(
2795        ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2796    if (currentCount != -1) {
2797      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2798          Math.max(currentCount - 1, 1));
2799    }
2800  }
2801
2802  public void expireSession(ZKWatcher nodeZK) throws Exception {
2803   expireSession(nodeZK, false);
2804  }
2805
2806  /**
2807   * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2808   * http://hbase.apache.org/book.html#trouble.zookeeper
2809   * There are issues when doing this:
2810   * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
2811   * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
2812   *
2813   * @param nodeZK - the ZK watcher to expire
2814   * @param checkStatus - true to check if we can create a Table with the
2815   *                    current configuration.
2816   */
2817  public void expireSession(ZKWatcher nodeZK, boolean checkStatus)
2818    throws Exception {
2819    Configuration c = new Configuration(this.conf);
2820    String quorumServers = ZKConfig.getZKQuorumServersString(c);
2821    ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2822    byte[] password = zk.getSessionPasswd();
2823    long sessionID = zk.getSessionId();
2824
2825    // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2826    //  so we create a first watcher to be sure that the
2827    //  event was sent. We expect that if our watcher receives the event
2828    //  other watchers on the same machine will get is as well.
2829    // When we ask to close the connection, ZK does not close it before
2830    //  we receive all the events, so don't have to capture the event, just
2831    //  closing the connection should be enough.
2832    ZooKeeper monitor = new ZooKeeper(quorumServers,
2833      1000, new org.apache.zookeeper.Watcher(){
2834      @Override
2835      public void process(WatchedEvent watchedEvent) {
2836        LOG.info("Monitor ZKW received event="+watchedEvent);
2837      }
2838    } , sessionID, password);
2839
2840    // Making it expire
2841    ZooKeeper newZK = new ZooKeeper(quorumServers,
2842        1000, EmptyWatcher.instance, sessionID, password);
2843
2844    //ensure that we have connection to the server before closing down, otherwise
2845    //the close session event will be eaten out before we start CONNECTING state
2846    long start = System.currentTimeMillis();
2847    while (newZK.getState() != States.CONNECTED
2848         && System.currentTimeMillis() - start < 1000) {
2849       Thread.sleep(1);
2850    }
2851    newZK.close();
2852    LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2853
2854    // Now closing & waiting to be sure that the clients get it.
2855    monitor.close();
2856
2857    if (checkStatus) {
2858      getConnection().getTable(TableName.META_TABLE_NAME).close();
2859    }
2860  }
2861
2862  /**
2863   * Get the Mini HBase cluster.
2864   *
2865   * @return hbase cluster
2866   * @see #getHBaseClusterInterface()
2867   */
2868  public MiniHBaseCluster getHBaseCluster() {
2869    return getMiniHBaseCluster();
2870  }
2871
2872  /**
2873   * Returns the HBaseCluster instance.
2874   * <p>Returned object can be any of the subclasses of HBaseCluster, and the
2875   * tests referring this should not assume that the cluster is a mini cluster or a
2876   * distributed one. If the test only works on a mini cluster, then specific
2877   * method {@link #getMiniHBaseCluster()} can be used instead w/o the
2878   * need to type-cast.
2879   */
2880  public HBaseCluster getHBaseClusterInterface() {
2881    //implementation note: we should rename this method as #getHBaseCluster(),
2882    //but this would require refactoring 90+ calls.
2883    return hbaseCluster;
2884  }
2885
2886  /**
2887   * Get a Connection to the cluster.
2888   * Not thread-safe (This class needs a lot of work to make it thread-safe).
2889   * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster.
2890   * @throws IOException
2891   */
2892  public Connection getConnection() throws IOException {
2893    if (this.connection == null) {
2894      this.connection = ConnectionFactory.createConnection(this.conf);
2895    }
2896    return this.connection;
2897  }
2898
2899  /**
2900   * Returns a Admin instance.
2901   * This instance is shared between HBaseTestingUtility instance users. Closing it has no effect,
2902   * it will be closed automatically when the cluster shutdowns
2903   *
2904   * @return HBaseAdmin instance which is guaranteed to support only {@link Admin} interface.
2905   *   Functions in HBaseAdmin not provided by {@link Admin} interface can be changed/deleted
2906   *   anytime.
2907   * @deprecated Since 2.0. Will be removed in 3.0. Use {@link #getAdmin()} instead.
2908   */
2909  @Deprecated
2910  public synchronized HBaseAdmin getHBaseAdmin()
2911  throws IOException {
2912    if (hbaseAdmin == null){
2913      this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
2914    }
2915    return hbaseAdmin;
2916  }
2917
2918  /**
2919   * Returns an Admin instance which is shared between HBaseTestingUtility instance users.
2920   * Closing it has no effect, it will be closed automatically when the cluster shutdowns
2921   */
2922  public synchronized Admin getAdmin() throws IOException {
2923    if (hbaseAdmin == null){
2924      this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
2925    }
2926    return hbaseAdmin;
2927  }
2928
2929  private HBaseAdmin hbaseAdmin = null;
2930
2931  /**
2932   * Returns an {@link Hbck} instance. Needs be closed when done.
2933   */
2934  public Hbck getHbck() throws IOException {
2935    return ((ClusterConnection) getConnection()).getHbck();
2936  }
2937
2938  /**
2939   * Unassign the named region.
2940   *
2941   * @param regionName  The region to unassign.
2942   */
2943  public void unassignRegion(String regionName) throws IOException {
2944    unassignRegion(Bytes.toBytes(regionName));
2945  }
2946
2947  /**
2948   * Unassign the named region.
2949   *
2950   * @param regionName  The region to unassign.
2951   */
2952  public void unassignRegion(byte[] regionName) throws IOException {
2953    getAdmin().unassign(regionName, true);
2954  }
2955
2956  /**
2957   * Closes the region containing the given row.
2958   *
2959   * @param row  The row to find the containing region.
2960   * @param table  The table to find the region.
2961   */
2962  public void unassignRegionByRow(String row, RegionLocator table) throws IOException {
2963    unassignRegionByRow(Bytes.toBytes(row), table);
2964  }
2965
2966  /**
2967   * Closes the region containing the given row.
2968   *
2969   * @param row  The row to find the containing region.
2970   * @param table  The table to find the region.
2971   * @throws IOException
2972   */
2973  public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOException {
2974    HRegionLocation hrl = table.getRegionLocation(row);
2975    unassignRegion(hrl.getRegionInfo().getRegionName());
2976  }
2977
2978  /*
2979   * Retrieves a splittable region randomly from tableName
2980   *
2981   * @param tableName name of table
2982   * @param maxAttempts maximum number of attempts, unlimited for value of -1
2983   * @return the HRegion chosen, null if none was found within limit of maxAttempts
2984   */
2985  public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2986    List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2987    int regCount = regions.size();
2988    Set<Integer> attempted = new HashSet<>();
2989    int idx;
2990    int attempts = 0;
2991    do {
2992      regions = getHBaseCluster().getRegions(tableName);
2993      if (regCount != regions.size()) {
2994        // if there was region movement, clear attempted Set
2995        attempted.clear();
2996      }
2997      regCount = regions.size();
2998      // There are chances that before we get the region for the table from an RS the region may
2999      // be going for CLOSE.  This may be because online schema change is enabled
3000      if (regCount > 0) {
3001        idx = random.nextInt(regCount);
3002        // if we have just tried this region, there is no need to try again
3003        if (attempted.contains(idx))
3004          continue;
3005        try {
3006          regions.get(idx).checkSplit();
3007          return regions.get(idx);
3008        } catch (Exception ex) {
3009          LOG.warn("Caught exception", ex);
3010          attempted.add(idx);
3011        }
3012      }
3013      attempts++;
3014    } while (maxAttempts == -1 || attempts < maxAttempts);
3015    return null;
3016  }
3017
3018  public MiniDFSCluster getDFSCluster() {
3019    return dfsCluster;
3020  }
3021
3022  public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, IOException {
3023    setDFSCluster(cluster, true);
3024  }
3025
3026  /**
3027   * Set the MiniDFSCluster
3028   * @param cluster cluster to use
3029   * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before
3030   * it is set.
3031   * @throws IllegalStateException if the passed cluster is up when it is required to be down
3032   * @throws IOException if the FileSystem could not be set from the passed dfs cluster
3033   */
3034  public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown)
3035      throws IllegalStateException, IOException {
3036    if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) {
3037      throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
3038    }
3039    this.dfsCluster = cluster;
3040    this.setFs();
3041  }
3042
3043  public FileSystem getTestFileSystem() throws IOException {
3044    return HFileSystem.get(conf);
3045  }
3046
3047  /**
3048   * Wait until all regions in a table have been assigned.  Waits default timeout before giving up
3049   * (30 seconds).
3050   * @param table Table to wait on.
3051   * @throws InterruptedException
3052   * @throws IOException
3053   */
3054  public void waitTableAvailable(TableName table)
3055      throws InterruptedException, IOException {
3056    waitTableAvailable(table.getName(), 30000);
3057  }
3058
3059  public void waitTableAvailable(TableName table, long timeoutMillis)
3060      throws InterruptedException, IOException {
3061    waitFor(timeoutMillis, predicateTableAvailable(table));
3062  }
3063
3064  /**
3065   * Wait until all regions in a table have been assigned
3066   * @param table Table to wait on.
3067   * @param timeoutMillis Timeout.
3068   * @throws InterruptedException
3069   * @throws IOException
3070   */
3071  public void waitTableAvailable(byte[] table, long timeoutMillis)
3072  throws InterruptedException, IOException {
3073    waitFor(timeoutMillis, predicateTableAvailable(TableName.valueOf(table)));
3074  }
3075
3076  public String explainTableAvailability(TableName tableName) throws IOException {
3077    String msg = explainTableState(tableName, TableState.State.ENABLED) + ", ";
3078    if (getHBaseCluster().getMaster().isAlive()) {
3079      Map<RegionInfo, ServerName> assignments =
3080          getHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
3081              .getRegionAssignments();
3082      final List<Pair<RegionInfo, ServerName>> metaLocations =
3083          MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
3084      for (Pair<RegionInfo, ServerName> metaLocation : metaLocations) {
3085        RegionInfo hri = metaLocation.getFirst();
3086        ServerName sn = metaLocation.getSecond();
3087        if (!assignments.containsKey(hri)) {
3088          msg += ", region " + hri
3089              + " not assigned, but found in meta, it expected to be on " + sn;
3090
3091        } else if (sn == null) {
3092          msg += ",  region " + hri
3093              + " assigned,  but has no server in meta";
3094        } else if (!sn.equals(assignments.get(hri))) {
3095          msg += ",  region " + hri
3096              + " assigned,  but has different servers in meta and AM ( " +
3097              sn + " <> " + assignments.get(hri);
3098        }
3099      }
3100    }
3101    return msg;
3102  }
3103
3104  public String explainTableState(final TableName table, TableState.State state)
3105      throws IOException {
3106    TableState tableState = MetaTableAccessor.getTableState(connection, table);
3107    if (tableState == null) {
3108      return "TableState in META: No table state in META for table " + table
3109          + " last state in meta (including deleted is " + findLastTableState(table) + ")";
3110    } else if (!tableState.inStates(state)) {
3111      return "TableState in META: Not " + state + " state, but " + tableState;
3112    } else {
3113      return "TableState in META: OK";
3114    }
3115  }
3116
3117  @Nullable
3118  public TableState findLastTableState(final TableName table) throws IOException {
3119    final AtomicReference<TableState> lastTableState = new AtomicReference<>(null);
3120    MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
3121      @Override
3122      public boolean visit(Result r) throws IOException {
3123        if (!Arrays.equals(r.getRow(), table.getName()))
3124          return false;
3125        TableState state = MetaTableAccessor.getTableState(r);
3126        if (state != null)
3127          lastTableState.set(state);
3128        return true;
3129      }
3130    };
3131    MetaTableAccessor
3132        .scanMeta(connection, null, null,
3133            MetaTableAccessor.QueryType.TABLE,
3134            Integer.MAX_VALUE, visitor);
3135    return lastTableState.get();
3136  }
3137
3138  /**
3139   * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
3140   * regions have been all assigned.  Will timeout after default period (30 seconds)
3141   * Tolerates nonexistent table.
3142   * @param table the table to wait on.
3143   * @throws InterruptedException if interrupted while waiting
3144   * @throws IOException if an IO problem is encountered
3145   */
3146  public void waitTableEnabled(TableName table)
3147      throws InterruptedException, IOException {
3148    waitTableEnabled(table, 30000);
3149  }
3150
3151  /**
3152   * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
3153   * regions have been all assigned.
3154   * @see #waitTableEnabled(TableName, long)
3155   * @param table Table to wait on.
3156   * @param timeoutMillis Time to wait on it being marked enabled.
3157   * @throws InterruptedException
3158   * @throws IOException
3159   */
3160  public void waitTableEnabled(byte[] table, long timeoutMillis)
3161  throws InterruptedException, IOException {
3162    waitTableEnabled(TableName.valueOf(table), timeoutMillis);
3163  }
3164
3165  public void waitTableEnabled(TableName table, long timeoutMillis)
3166  throws IOException {
3167    waitFor(timeoutMillis, predicateTableEnabled(table));
3168  }
3169
3170  /**
3171   * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
3172   * Will timeout after default period (30 seconds)
3173   * @param table Table to wait on.
3174   * @throws InterruptedException
3175   * @throws IOException
3176   */
3177  public void waitTableDisabled(byte[] table)
3178          throws InterruptedException, IOException {
3179    waitTableDisabled(table, 30000);
3180  }
3181
3182  public void waitTableDisabled(TableName table, long millisTimeout)
3183          throws InterruptedException, IOException {
3184    waitFor(millisTimeout, predicateTableDisabled(table));
3185  }
3186
3187  /**
3188   * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
3189   * @param table Table to wait on.
3190   * @param timeoutMillis Time to wait on it being marked disabled.
3191   * @throws InterruptedException
3192   * @throws IOException
3193   */
3194  public void waitTableDisabled(byte[] table, long timeoutMillis)
3195          throws InterruptedException, IOException {
3196    waitTableDisabled(TableName.valueOf(table), timeoutMillis);
3197  }
3198
3199  /**
3200   * Make sure that at least the specified number of region servers
3201   * are running
3202   * @param num minimum number of region servers that should be running
3203   * @return true if we started some servers
3204   * @throws IOException
3205   */
3206  public boolean ensureSomeRegionServersAvailable(final int num)
3207      throws IOException {
3208    boolean startedServer = false;
3209    MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
3210    for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
3211      LOG.info("Started new server=" + hbaseCluster.startRegionServer());
3212      startedServer = true;
3213    }
3214
3215    return startedServer;
3216  }
3217
3218
3219  /**
3220   * Make sure that at least the specified number of region servers
3221   * are running. We don't count the ones that are currently stopping or are
3222   * stopped.
3223   * @param num minimum number of region servers that should be running
3224   * @return true if we started some servers
3225   * @throws IOException
3226   */
3227  public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
3228    throws IOException {
3229    boolean startedServer = ensureSomeRegionServersAvailable(num);
3230
3231    int nonStoppedServers = 0;
3232    for (JVMClusterUtil.RegionServerThread rst :
3233      getMiniHBaseCluster().getRegionServerThreads()) {
3234
3235      HRegionServer hrs = rst.getRegionServer();
3236      if (hrs.isStopping() || hrs.isStopped()) {
3237        LOG.info("A region server is stopped or stopping:"+hrs);
3238      } else {
3239        nonStoppedServers++;
3240      }
3241    }
3242    for (int i=nonStoppedServers; i<num; ++i) {
3243      LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
3244      startedServer = true;
3245    }
3246    return startedServer;
3247  }
3248
3249
3250  /**
3251   * This method clones the passed <code>c</code> configuration setting a new
3252   * user into the clone.  Use it getting new instances of FileSystem.  Only
3253   * works for DistributedFileSystem w/o Kerberos.
3254   * @param c Initial configuration
3255   * @param differentiatingSuffix Suffix to differentiate this user from others.
3256   * @return A new configuration instance with a different user set into it.
3257   * @throws IOException
3258   */
3259  public static User getDifferentUser(final Configuration c,
3260    final String differentiatingSuffix)
3261  throws IOException {
3262    FileSystem currentfs = FileSystem.get(c);
3263    if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) {
3264      return User.getCurrent();
3265    }
3266    // Else distributed filesystem.  Make a new instance per daemon.  Below
3267    // code is taken from the AppendTestUtil over in hdfs.
3268    String username = User.getCurrent().getName() +
3269      differentiatingSuffix;
3270    User user = User.createUserForTesting(c, username,
3271        new String[]{"supergroup"});
3272    return user;
3273  }
3274
3275  public static NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster)
3276      throws IOException {
3277    NavigableSet<String> online = new TreeSet<>();
3278    for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
3279      try {
3280        for (RegionInfo region :
3281            ProtobufUtil.getOnlineRegions(rst.getRegionServer().getRSRpcServices())) {
3282          online.add(region.getRegionNameAsString());
3283        }
3284      } catch (RegionServerStoppedException e) {
3285        // That's fine.
3286      }
3287    }
3288    for (MasterThread mt : cluster.getLiveMasterThreads()) {
3289      try {
3290        for (RegionInfo region :
3291            ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) {
3292          online.add(region.getRegionNameAsString());
3293        }
3294      } catch (RegionServerStoppedException e) {
3295        // That's fine.
3296      } catch (ServerNotRunningYetException e) {
3297        // That's fine.
3298      }
3299    }
3300    return online;
3301  }
3302
3303  /**
3304   * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
3305   * makes tests linger.  Here is the exception you'll see:
3306   * <pre>
3307   * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block
3308   * blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
3309   * blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683
3310   * failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
3311   * </pre>
3312   * @param stream A DFSClient.DFSOutputStream.
3313   * @param max
3314   * @throws NoSuchFieldException
3315   * @throws SecurityException
3316   * @throws IllegalAccessException
3317   * @throws IllegalArgumentException
3318   */
3319  public static void setMaxRecoveryErrorCount(final OutputStream stream,
3320      final int max) {
3321    try {
3322      Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
3323      for (Class<?> clazz: clazzes) {
3324        String className = clazz.getSimpleName();
3325        if (className.equals("DFSOutputStream")) {
3326          if (clazz.isInstance(stream)) {
3327            Field maxRecoveryErrorCountField =
3328              stream.getClass().getDeclaredField("maxRecoveryErrorCount");
3329            maxRecoveryErrorCountField.setAccessible(true);
3330            maxRecoveryErrorCountField.setInt(stream, max);
3331            break;
3332          }
3333        }
3334      }
3335    } catch (Exception e) {
3336      LOG.info("Could not set max recovery field", e);
3337    }
3338  }
3339
3340  /**
3341   * Uses directly the assignment manager to assign the region. and waits until the specified region
3342   * has completed assignment.
3343   * @return true if the region is assigned false otherwise.
3344   */
3345  public boolean assignRegion(final RegionInfo regionInfo)
3346      throws IOException, InterruptedException {
3347    final AssignmentManager am = getHBaseCluster().getMaster().getAssignmentManager();
3348    am.assign(regionInfo);
3349    return AssignmentTestingUtil.waitForAssignment(am, regionInfo);
3350  }
3351
3352  /**
3353   * Move region to destination server and wait till region is completely moved and online
3354   *
3355   * @param destRegion region to move
3356   * @param destServer destination server of the region
3357   * @throws InterruptedException
3358   * @throws IOException
3359   */
3360  public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer)
3361      throws InterruptedException, IOException {
3362    HMaster master = getMiniHBaseCluster().getMaster();
3363    // TODO: Here we start the move. The move can take a while.
3364    getAdmin().move(destRegion.getEncodedNameAsBytes(),
3365        Bytes.toBytes(destServer.getServerName()));
3366    while (true) {
3367      ServerName serverName = master.getAssignmentManager().getRegionStates()
3368          .getRegionServerOfRegion(destRegion);
3369      if (serverName != null && serverName.equals(destServer)) {
3370        assertRegionOnServer(destRegion, serverName, 2000);
3371        break;
3372      }
3373      Thread.sleep(10);
3374    }
3375  }
3376
3377  /**
3378   * Wait until all regions for a table in hbase:meta have a non-empty
3379   * info:server, up to a configuable timeout value (default is 60 seconds)
3380   * This means all regions have been deployed,
3381   * master has been informed and updated hbase:meta with the regions deployed
3382   * server.
3383   * @param tableName the table name
3384   * @throws IOException
3385   */
3386  public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
3387    waitUntilAllRegionsAssigned(tableName,
3388      this.conf.getLong("hbase.client.sync.wait.timeout.msec", 60000));
3389  }
3390
3391  /**
3392   * Waith until all system table's regions get assigned
3393   * @throws IOException
3394   */
3395  public void waitUntilAllSystemRegionsAssigned() throws IOException {
3396    waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
3397    waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME);
3398  }
3399
3400  /**
3401   * Wait until all regions for a table in hbase:meta have a non-empty
3402   * info:server, or until timeout.  This means all regions have been deployed,
3403   * master has been informed and updated hbase:meta with the regions deployed
3404   * server.
3405   * @param tableName the table name
3406   * @param timeout timeout, in milliseconds
3407   * @throws IOException
3408   */
3409  public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
3410      throws IOException {
3411    if (!TableName.isMetaTableName(tableName)) {
3412      try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) {
3413        LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " +
3414            timeout + "ms");
3415        waitFor(timeout, 200, true, new ExplainingPredicate<IOException>() {
3416          @Override
3417          public String explainFailure() throws IOException {
3418            return explainTableAvailability(tableName);
3419          }
3420
3421          @Override
3422          public boolean evaluate() throws IOException {
3423            Scan scan = new Scan();
3424            scan.addFamily(HConstants.CATALOG_FAMILY);
3425            boolean tableFound = false;
3426            try (ResultScanner s = meta.getScanner(scan)) {
3427              for (Result r; (r = s.next()) != null;) {
3428                byte[] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
3429                HRegionInfo info = HRegionInfo.parseFromOrNull(b);
3430                if (info != null && info.getTable().equals(tableName)) {
3431                  // Get server hosting this region from catalog family. Return false if no server
3432                  // hosting this region, or if the server hosting this region was recently killed
3433                  // (for fault tolerance testing).
3434                  tableFound = true;
3435                  byte[] server =
3436                      r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
3437                  if (server == null) {
3438                    return false;
3439                  } else {
3440                    byte[] startCode =
3441                        r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
3442                    ServerName serverName =
3443                        ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," +
3444                            Bytes.toLong(startCode));
3445                    if (!getHBaseClusterInterface().isDistributedCluster() &&
3446                        getHBaseCluster().isKilledRS(serverName)) {
3447                      return false;
3448                    }
3449                  }
3450                  if (RegionStateStore.getRegionState(r,
3451                    info.getReplicaId()) != RegionState.State.OPEN) {
3452                    return false;
3453                  }
3454                }
3455              }
3456            }
3457            if (!tableFound) {
3458              LOG.warn("Didn't find the entries for table " + tableName + " in meta, already deleted?");
3459            }
3460            return tableFound;
3461          }
3462        });
3463      }
3464    }
3465    LOG.info("All regions for table " + tableName + " assigned to meta. Checking AM states.");
3466    // check from the master state if we are using a mini cluster
3467    if (!getHBaseClusterInterface().isDistributedCluster()) {
3468      // So, all regions are in the meta table but make sure master knows of the assignments before
3469      // returning -- sometimes this can lag.
3470      HMaster master = getHBaseCluster().getMaster();
3471      final RegionStates states = master.getAssignmentManager().getRegionStates();
3472      waitFor(timeout, 200, new ExplainingPredicate<IOException>() {
3473        @Override
3474        public String explainFailure() throws IOException {
3475          return explainTableAvailability(tableName);
3476        }
3477
3478        @Override
3479        public boolean evaluate() throws IOException {
3480          List<RegionInfo> hris = states.getRegionsOfTable(tableName);
3481          return hris != null && !hris.isEmpty();
3482        }
3483      });
3484    }
3485    LOG.info("All regions for table " + tableName + " assigned.");
3486  }
3487
3488  /**
3489   * Do a small get/scan against one store. This is required because store
3490   * has no actual methods of querying itself, and relies on StoreScanner.
3491   */
3492  public static List<Cell> getFromStoreFile(HStore store,
3493                                                Get get) throws IOException {
3494    Scan scan = new Scan(get);
3495    InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3496        scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()),
3497        // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set
3498        // readpoint 0.
3499        0);
3500
3501    List<Cell> result = new ArrayList<>();
3502    scanner.next(result);
3503    if (!result.isEmpty()) {
3504      // verify that we are on the row we want:
3505      Cell kv = result.get(0);
3506      if (!CellUtil.matchingRows(kv, get.getRow())) {
3507        result.clear();
3508      }
3509    }
3510    scanner.close();
3511    return result;
3512  }
3513
3514  /**
3515   * Create region split keys between startkey and endKey
3516   *
3517   * @param startKey
3518   * @param endKey
3519   * @param numRegions the number of regions to be created. it has to be greater than 3.
3520   * @return resulting split keys
3521   */
3522  public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
3523    assertTrue(numRegions>3);
3524    byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3525    byte [][] result = new byte[tmpSplitKeys.length+1][];
3526    System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3527    result[0] = HConstants.EMPTY_BYTE_ARRAY;
3528    return result;
3529  }
3530
3531  /**
3532   * Do a small get/scan against one store. This is required because store
3533   * has no actual methods of querying itself, and relies on StoreScanner.
3534   */
3535  public static List<Cell> getFromStoreFile(HStore store,
3536                                                byte [] row,
3537                                                NavigableSet<byte[]> columns
3538                                                ) throws IOException {
3539    Get get = new Get(row);
3540    Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3541    s.put(store.getColumnFamilyDescriptor().getName(), columns);
3542
3543    return getFromStoreFile(store,get);
3544  }
3545
3546  public static void assertKVListsEqual(String additionalMsg,
3547      final List<? extends Cell> expected,
3548      final List<? extends Cell> actual) {
3549    final int eLen = expected.size();
3550    final int aLen = actual.size();
3551    final int minLen = Math.min(eLen, aLen);
3552
3553    int i;
3554    for (i = 0; i < minLen
3555        && CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0;
3556        ++i) {}
3557
3558    if (additionalMsg == null) {
3559      additionalMsg = "";
3560    }
3561    if (!additionalMsg.isEmpty()) {
3562      additionalMsg = ". " + additionalMsg;
3563    }
3564
3565    if (eLen != aLen || i != minLen) {
3566      throw new AssertionError(
3567          "Expected and actual KV arrays differ at position " + i + ": " +
3568          safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3569          safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3570    }
3571  }
3572
3573  public static <T> String safeGetAsStr(List<T> lst, int i) {
3574    if (0 <= i && i < lst.size()) {
3575      return lst.get(i).toString();
3576    } else {
3577      return "<out_of_range>";
3578    }
3579  }
3580
3581  public String getClusterKey() {
3582    return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3583        + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3584        + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3585            HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3586  }
3587
3588  /** Creates a random table with the given parameters */
3589  public Table createRandomTable(TableName tableName,
3590      final Collection<String> families,
3591      final int maxVersions,
3592      final int numColsPerRow,
3593      final int numFlushes,
3594      final int numRegions,
3595      final int numRowsPerFlush)
3596      throws IOException, InterruptedException {
3597
3598    LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3599        " regions, " + numFlushes + " storefiles per region, " +
3600        numRowsPerFlush + " rows per flush, maxVersions=" +  maxVersions +
3601        "\n");
3602
3603    final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3604    final int numCF = families.size();
3605    final byte[][] cfBytes = new byte[numCF][];
3606    {
3607      int cfIndex = 0;
3608      for (String cf : families) {
3609        cfBytes[cfIndex++] = Bytes.toBytes(cf);
3610      }
3611    }
3612
3613    final int actualStartKey = 0;
3614    final int actualEndKey = Integer.MAX_VALUE;
3615    final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3616    final int splitStartKey = actualStartKey + keysPerRegion;
3617    final int splitEndKey = actualEndKey - keysPerRegion;
3618    final String keyFormat = "%08x";
3619    final Table table = createTable(tableName, cfBytes,
3620        maxVersions,
3621        Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3622        Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3623        numRegions);
3624
3625    if (hbaseCluster != null) {
3626      getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3627    }
3628
3629    BufferedMutator mutator = getConnection().getBufferedMutator(tableName);
3630
3631    for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3632      for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3633        final byte[] row = Bytes.toBytes(String.format(keyFormat,
3634            actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3635
3636        Put put = new Put(row);
3637        Delete del = new Delete(row);
3638        for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3639          final byte[] cf = cfBytes[rand.nextInt(numCF)];
3640          final long ts = rand.nextInt();
3641          final byte[] qual = Bytes.toBytes("col" + iCol);
3642          if (rand.nextBoolean()) {
3643            final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3644                "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3645                ts + "_random_" + rand.nextLong());
3646            put.addColumn(cf, qual, ts, value);
3647          } else if (rand.nextDouble() < 0.8) {
3648            del.addColumn(cf, qual, ts);
3649          } else {
3650            del.addColumns(cf, qual, ts);
3651          }
3652        }
3653
3654        if (!put.isEmpty()) {
3655          mutator.mutate(put);
3656        }
3657
3658        if (!del.isEmpty()) {
3659          mutator.mutate(del);
3660        }
3661      }
3662      LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3663      mutator.flush();
3664      if (hbaseCluster != null) {
3665        getMiniHBaseCluster().flushcache(table.getName());
3666      }
3667    }
3668    mutator.close();
3669
3670    return table;
3671  }
3672
3673  private static Random random = new Random();
3674
3675  private static final PortAllocator portAllocator = new PortAllocator(random);
3676
3677  public static int randomFreePort() {
3678    return portAllocator.randomFreePort();
3679  }
3680
3681  static class PortAllocator {
3682    private static final int MIN_RANDOM_PORT = 0xc000;
3683    private static final int MAX_RANDOM_PORT = 0xfffe;
3684
3685    /** A set of ports that have been claimed using {@link #randomFreePort()}. */
3686    private final Set<Integer> takenRandomPorts = new HashSet<>();
3687
3688    private final Random random;
3689    private final AvailablePortChecker portChecker;
3690
3691    public PortAllocator(Random random) {
3692      this.random = random;
3693      this.portChecker = new AvailablePortChecker() {
3694        @Override
3695        public boolean available(int port) {
3696          try {
3697            ServerSocket sock = new ServerSocket(port);
3698            sock.close();
3699            return true;
3700          } catch (IOException ex) {
3701            return false;
3702          }
3703        }
3704      };
3705    }
3706
3707    public PortAllocator(Random random, AvailablePortChecker portChecker) {
3708      this.random = random;
3709      this.portChecker = portChecker;
3710    }
3711
3712    /**
3713     * Returns a random free port and marks that port as taken. Not thread-safe. Expected to be
3714     * called from single-threaded test setup code/
3715     */
3716    public int randomFreePort() {
3717      int port = 0;
3718      do {
3719        port = randomPort();
3720        if (takenRandomPorts.contains(port)) {
3721          port = 0;
3722          continue;
3723        }
3724        takenRandomPorts.add(port);
3725
3726        if (!portChecker.available(port)) {
3727          port = 0;
3728        }
3729      } while (port == 0);
3730      return port;
3731    }
3732
3733    /**
3734     * Returns a random port. These ports cannot be registered with IANA and are
3735     * intended for dynamic allocation (see http://bit.ly/dynports).
3736     */
3737    private int randomPort() {
3738      return MIN_RANDOM_PORT
3739          + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3740    }
3741
3742    interface AvailablePortChecker {
3743      boolean available(int port);
3744    }
3745  }
3746
3747  public static String randomMultiCastAddress() {
3748    return "226.1.1." + random.nextInt(254);
3749  }
3750
3751  public static void waitForHostPort(String host, int port)
3752      throws IOException {
3753    final int maxTimeMs = 10000;
3754    final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3755    IOException savedException = null;
3756    LOG.info("Waiting for server at " + host + ":" + port);
3757    for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3758      try {
3759        Socket sock = new Socket(InetAddress.getByName(host), port);
3760        sock.close();
3761        savedException = null;
3762        LOG.info("Server at " + host + ":" + port + " is available");
3763        break;
3764      } catch (UnknownHostException e) {
3765        throw new IOException("Failed to look up " + host, e);
3766      } catch (IOException e) {
3767        savedException = e;
3768      }
3769      Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3770    }
3771
3772    if (savedException != null) {
3773      throw savedException;
3774    }
3775  }
3776
3777  /**
3778   * Creates a pre-split table for load testing. If the table already exists,
3779   * logs a warning and continues.
3780   * @return the number of regions the table was split into
3781   */
3782  public static int createPreSplitLoadTestTable(Configuration conf,
3783      TableName tableName, byte[] columnFamily, Algorithm compression,
3784      DataBlockEncoding dataBlockEncoding) throws IOException {
3785    return createPreSplitLoadTestTable(conf, tableName,
3786      columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1,
3787      Durability.USE_DEFAULT);
3788  }
3789  /**
3790   * Creates a pre-split table for load testing. If the table already exists,
3791   * logs a warning and continues.
3792   * @return the number of regions the table was split into
3793   */
3794  public static int createPreSplitLoadTestTable(Configuration conf,
3795      TableName tableName, byte[] columnFamily, Algorithm compression,
3796      DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
3797      Durability durability)
3798          throws IOException {
3799    HTableDescriptor desc = new HTableDescriptor(tableName);
3800    desc.setDurability(durability);
3801    desc.setRegionReplication(regionReplication);
3802    HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3803    hcd.setDataBlockEncoding(dataBlockEncoding);
3804    hcd.setCompressionType(compression);
3805    return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
3806  }
3807
3808  /**
3809   * Creates a pre-split table for load testing. If the table already exists,
3810   * logs a warning and continues.
3811   * @return the number of regions the table was split into
3812   */
3813  public static int createPreSplitLoadTestTable(Configuration conf,
3814      TableName tableName, byte[][] columnFamilies, Algorithm compression,
3815      DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
3816      Durability durability)
3817          throws IOException {
3818    HTableDescriptor desc = new HTableDescriptor(tableName);
3819    desc.setDurability(durability);
3820    desc.setRegionReplication(regionReplication);
3821    HColumnDescriptor[] hcds = new HColumnDescriptor[columnFamilies.length];
3822    for (int i = 0; i < columnFamilies.length; i++) {
3823      HColumnDescriptor hcd = new HColumnDescriptor(columnFamilies[i]);
3824      hcd.setDataBlockEncoding(dataBlockEncoding);
3825      hcd.setCompressionType(compression);
3826      hcds[i] = hcd;
3827    }
3828    return createPreSplitLoadTestTable(conf, desc, hcds, numRegionsPerServer);
3829  }
3830
3831  /**
3832   * Creates a pre-split table for load testing. If the table already exists,
3833   * logs a warning and continues.
3834   * @return the number of regions the table was split into
3835   */
3836  public static int createPreSplitLoadTestTable(Configuration conf,
3837      TableDescriptor desc, ColumnFamilyDescriptor hcd) throws IOException {
3838    return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3839  }
3840
3841  /**
3842   * Creates a pre-split table for load testing. If the table already exists,
3843   * logs a warning and continues.
3844   * @return the number of regions the table was split into
3845   */
3846  public static int createPreSplitLoadTestTable(Configuration conf,
3847      TableDescriptor desc, ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException {
3848    return createPreSplitLoadTestTable(conf, desc, new ColumnFamilyDescriptor[] {hcd},
3849        numRegionsPerServer);
3850  }
3851
3852  /**
3853   * Creates a pre-split table for load testing. If the table already exists,
3854   * logs a warning and continues.
3855   * @return the number of regions the table was split into
3856   */
3857  public static int createPreSplitLoadTestTable(Configuration conf,
3858      TableDescriptor desc, ColumnFamilyDescriptor[] hcds,
3859      int numRegionsPerServer) throws IOException {
3860    return createPreSplitLoadTestTable(conf, desc, hcds,
3861      new RegionSplitter.HexStringSplit(), numRegionsPerServer);
3862  }
3863
3864  /**
3865   * Creates a pre-split table for load testing. If the table already exists,
3866   * logs a warning and continues.
3867   * @return the number of regions the table was split into
3868   */
3869  public static int createPreSplitLoadTestTable(Configuration conf,
3870      TableDescriptor td, ColumnFamilyDescriptor[] cds,
3871      SplitAlgorithm splitter, int numRegionsPerServer) throws IOException {
3872    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(td);
3873    for (ColumnFamilyDescriptor cd : cds) {
3874      if (!td.hasColumnFamily(cd.getName())) {
3875        builder.setColumnFamily(cd);
3876      }
3877    }
3878    td = builder.build();
3879    int totalNumberOfRegions = 0;
3880    Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
3881    Admin admin = unmanagedConnection.getAdmin();
3882
3883    try {
3884      // create a table a pre-splits regions.
3885      // The number of splits is set as:
3886      //    region servers * regions per region server).
3887      int numberOfServers =
3888          admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics()
3889              .size();
3890      if (numberOfServers == 0) {
3891        throw new IllegalStateException("No live regionservers");
3892      }
3893
3894      totalNumberOfRegions = numberOfServers * numRegionsPerServer;
3895      LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3896          "pre-splitting table into " + totalNumberOfRegions + " regions " +
3897          "(regions per server: " + numRegionsPerServer + ")");
3898
3899      byte[][] splits = splitter.split(
3900          totalNumberOfRegions);
3901
3902      admin.createTable(td, splits);
3903    } catch (MasterNotRunningException e) {
3904      LOG.error("Master not running", e);
3905      throw new IOException(e);
3906    } catch (TableExistsException e) {
3907      LOG.warn("Table " + td.getTableName() +
3908          " already exists, continuing");
3909    } finally {
3910      admin.close();
3911      unmanagedConnection.close();
3912    }
3913    return totalNumberOfRegions;
3914  }
3915
3916  public static int getMetaRSPort(Connection connection) throws IOException {
3917    try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) {
3918      return locator.getRegionLocation(Bytes.toBytes("")).getPort();
3919    }
3920  }
3921
3922  /**
3923   *  Due to async racing issue, a region may not be in
3924   *  the online region list of a region server yet, after
3925   *  the assignment znode is deleted and the new assignment
3926   *  is recorded in master.
3927   */
3928  public void assertRegionOnServer(
3929      final RegionInfo hri, final ServerName server,
3930      final long timeout) throws IOException, InterruptedException {
3931    long timeoutTime = System.currentTimeMillis() + timeout;
3932    while (true) {
3933      List<RegionInfo> regions = getAdmin().getRegions(server);
3934      if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) return;
3935      long now = System.currentTimeMillis();
3936      if (now > timeoutTime) break;
3937      Thread.sleep(10);
3938    }
3939    fail("Could not find region " + hri.getRegionNameAsString()
3940      + " on server " + server);
3941  }
3942
3943  /**
3944   * Check to make sure the region is open on the specified
3945   * region server, but not on any other one.
3946   */
3947  public void assertRegionOnlyOnServer(
3948      final RegionInfo hri, final ServerName server,
3949      final long timeout) throws IOException, InterruptedException {
3950    long timeoutTime = System.currentTimeMillis() + timeout;
3951    while (true) {
3952      List<RegionInfo> regions = getAdmin().getRegions(server);
3953      if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) {
3954        List<JVMClusterUtil.RegionServerThread> rsThreads =
3955          getHBaseCluster().getLiveRegionServerThreads();
3956        for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3957          HRegionServer rs = rsThread.getRegionServer();
3958          if (server.equals(rs.getServerName())) {
3959            continue;
3960          }
3961          Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3962          for (HRegion r: hrs) {
3963            assertTrue("Region should not be double assigned",
3964              r.getRegionInfo().getRegionId() != hri.getRegionId());
3965          }
3966        }
3967        return; // good, we are happy
3968      }
3969      long now = System.currentTimeMillis();
3970      if (now > timeoutTime) break;
3971      Thread.sleep(10);
3972    }
3973    fail("Could not find region " + hri.getRegionNameAsString()
3974      + " on server " + server);
3975  }
3976
3977  public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd)
3978      throws IOException {
3979    TableDescriptor td
3980        = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
3981            .setColumnFamily(cd)
3982            .build();
3983    HRegionInfo info =
3984        new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3985    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td);
3986  }
3987
3988  public void setFileSystemURI(String fsURI) {
3989    FS_URI = fsURI;
3990  }
3991
3992  /**
3993   * Returns a {@link Predicate} for checking that there are no regions in transition in master
3994   */
3995  public ExplainingPredicate<IOException> predicateNoRegionsInTransition() {
3996    return new ExplainingPredicate<IOException>() {
3997      @Override
3998      public String explainFailure() throws IOException {
3999        final RegionStates regionStates = getMiniHBaseCluster().getMaster()
4000            .getAssignmentManager().getRegionStates();
4001        return "found in transition: " + regionStates.getRegionsInTransition().toString();
4002      }
4003
4004      @Override
4005      public boolean evaluate() throws IOException {
4006        HMaster master = getMiniHBaseCluster().getMaster();
4007        if (master == null) return false;
4008        AssignmentManager am = master.getAssignmentManager();
4009        if (am == null) return false;
4010        return !am.hasRegionsInTransition();
4011      }
4012    };
4013  }
4014
4015  /**
4016   * Returns a {@link Predicate} for checking that table is enabled
4017   */
4018  public Waiter.Predicate<IOException> predicateTableEnabled(final TableName tableName) {
4019    return new ExplainingPredicate<IOException>() {
4020      @Override
4021      public String explainFailure() throws IOException {
4022        return explainTableState(tableName, TableState.State.ENABLED);
4023      }
4024
4025      @Override
4026      public boolean evaluate() throws IOException {
4027        return getAdmin().tableExists(tableName) && getAdmin().isTableEnabled(tableName);
4028      }
4029    };
4030  }
4031
4032  /**
4033   * Returns a {@link Predicate} for checking that table is enabled
4034   */
4035  public Waiter.Predicate<IOException> predicateTableDisabled(final TableName tableName) {
4036    return new ExplainingPredicate<IOException>() {
4037      @Override
4038      public String explainFailure() throws IOException {
4039        return explainTableState(tableName, TableState.State.DISABLED);
4040      }
4041
4042      @Override
4043      public boolean evaluate() throws IOException {
4044        return getAdmin().isTableDisabled(tableName);
4045      }
4046    };
4047  }
4048
4049  /**
4050   * Returns a {@link Predicate} for checking that table is enabled
4051   */
4052  public Waiter.Predicate<IOException> predicateTableAvailable(final TableName tableName) {
4053    return new ExplainingPredicate<IOException>() {
4054      @Override
4055      public String explainFailure() throws IOException {
4056        return explainTableAvailability(tableName);
4057      }
4058
4059      @Override
4060      public boolean evaluate() throws IOException {
4061        boolean tableAvailable = getAdmin().isTableAvailable(tableName);
4062        if (tableAvailable) {
4063          try (Table table = getConnection().getTable(tableName)) {
4064            TableDescriptor htd = table.getDescriptor();
4065            for (HRegionLocation loc : getConnection().getRegionLocator(tableName)
4066                .getAllRegionLocations()) {
4067              Scan scan = new Scan().withStartRow(loc.getRegionInfo().getStartKey())
4068                  .withStopRow(loc.getRegionInfo().getEndKey()).setOneRowLimit()
4069                  .setMaxResultsPerColumnFamily(1).setCacheBlocks(false);
4070              for (byte[] family : htd.getColumnFamilyNames()) {
4071                scan.addFamily(family);
4072              }
4073              try (ResultScanner scanner = table.getScanner(scan)) {
4074                scanner.next();
4075              }
4076            }
4077          }
4078        }
4079        return tableAvailable;
4080      }
4081    };
4082  }
4083
4084  /**
4085   * Wait until no regions in transition.
4086   * @param timeout How long to wait.
4087   * @throws IOException
4088   */
4089  public void waitUntilNoRegionsInTransition(final long timeout) throws IOException {
4090    waitFor(timeout, predicateNoRegionsInTransition());
4091  }
4092
4093  /**
4094   * Wait until no regions in transition. (time limit 15min)
4095   * @throws IOException
4096   */
4097  public void waitUntilNoRegionsInTransition() throws IOException {
4098    waitUntilNoRegionsInTransition(15 * 60000);
4099  }
4100
4101  /**
4102   * Wait until labels is ready in VisibilityLabelsCache.
4103   * @param timeoutMillis
4104   * @param labels
4105   */
4106  public void waitLabelAvailable(long timeoutMillis, final String... labels) {
4107    final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get();
4108    waitFor(timeoutMillis, new Waiter.ExplainingPredicate<RuntimeException>() {
4109
4110      @Override
4111      public boolean evaluate() {
4112        for (String label : labels) {
4113          if (labelsCache.getLabelOrdinal(label) == 0) {
4114            return false;
4115          }
4116        }
4117        return true;
4118      }
4119
4120      @Override
4121      public String explainFailure() {
4122        for (String label : labels) {
4123          if (labelsCache.getLabelOrdinal(label) == 0) {
4124            return label + " is not available yet";
4125          }
4126        }
4127        return "";
4128      }
4129    });
4130  }
4131
4132  /**
4133   * Create a set of column descriptors with the combination of compression,
4134   * encoding, bloom codecs available.
4135   * @return the list of column descriptors
4136   */
4137  public static List<HColumnDescriptor> generateColumnDescriptors() {
4138    return generateColumnDescriptors("");
4139  }
4140
4141  /**
4142   * Create a set of column descriptors with the combination of compression,
4143   * encoding, bloom codecs available.
4144   * @param prefix family names prefix
4145   * @return the list of column descriptors
4146   */
4147  public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
4148    List<HColumnDescriptor> htds = new ArrayList<>();
4149    long familyId = 0;
4150    for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
4151      for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
4152        for (BloomType bloomType: BloomType.values()) {
4153          String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
4154          HColumnDescriptor htd = new HColumnDescriptor(name);
4155          htd.setCompressionType(compressionType);
4156          htd.setDataBlockEncoding(encodingType);
4157          htd.setBloomFilterType(bloomType);
4158          htds.add(htd);
4159          familyId++;
4160        }
4161      }
4162    }
4163    return htds;
4164  }
4165
4166  /**
4167   * Get supported compression algorithms.
4168   * @return supported compression algorithms.
4169   */
4170  public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
4171    String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
4172    List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
4173    for (String algoName : allAlgos) {
4174      try {
4175        Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
4176        algo.getCompressor();
4177        supportedAlgos.add(algo);
4178      } catch (Throwable t) {
4179        // this algo is not available
4180      }
4181    }
4182    return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
4183  }
4184
4185  public Result getClosestRowBefore(Region r, byte[] row, byte[] family) throws IOException {
4186    Scan scan = new Scan(row);
4187    scan.setSmall(true);
4188    scan.setCaching(1);
4189    scan.setReversed(true);
4190    scan.addFamily(family);
4191    try (RegionScanner scanner = r.getScanner(scan)) {
4192      List<Cell> cells = new ArrayList<>(1);
4193      scanner.next(cells);
4194      if (r.getRegionInfo().isMetaRegion() && !isTargetTable(row, cells.get(0))) {
4195        return null;
4196      }
4197      return Result.create(cells);
4198    }
4199  }
4200
4201  private boolean isTargetTable(final byte[] inRow, Cell c) {
4202    String inputRowString = Bytes.toString(inRow);
4203    int i = inputRowString.indexOf(HConstants.DELIMITER);
4204    String outputRowString = Bytes.toString(c.getRowArray(), c.getRowOffset(), c.getRowLength());
4205    int o = outputRowString.indexOf(HConstants.DELIMITER);
4206    return inputRowString.substring(0, i).equals(outputRowString.substring(0, o));
4207  }
4208
4209  /**
4210   * Sets up {@link MiniKdc} for testing security.
4211   * Uses {@link HBaseKerberosUtils} to set the given keytab file as
4212   * {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}.
4213   */
4214  public MiniKdc setupMiniKdc(File keytabFile) throws Exception {
4215    Properties conf = MiniKdc.createConf();
4216    conf.put(MiniKdc.DEBUG, true);
4217    MiniKdc kdc = null;
4218    File dir = null;
4219    // There is time lag between selecting a port and trying to bind with it. It's possible that
4220    // another service captures the port in between which'll result in BindException.
4221    boolean bindException;
4222    int numTries = 0;
4223    do {
4224      try {
4225        bindException = false;
4226        dir = new File(getDataTestDir("kdc").toUri().getPath());
4227        kdc = new MiniKdc(conf, dir);
4228        kdc.start();
4229      } catch (BindException e) {
4230        FileUtils.deleteDirectory(dir);  // clean directory
4231        numTries++;
4232        if (numTries == 3) {
4233          LOG.error("Failed setting up MiniKDC. Tried " + numTries + " times.");
4234          throw e;
4235        }
4236        LOG.error("BindException encountered when setting up MiniKdc. Trying again.");
4237        bindException = true;
4238      }
4239    } while (bindException);
4240    HBaseKerberosUtils.setKeytabFileForTesting(keytabFile.getAbsolutePath());
4241    return kdc;
4242  }
4243
4244  public int getNumHFiles(final TableName tableName, final byte[] family) {
4245    int numHFiles = 0;
4246    for (RegionServerThread regionServerThread : getMiniHBaseCluster().getRegionServerThreads()) {
4247      numHFiles+= getNumHFilesForRS(regionServerThread.getRegionServer(), tableName,
4248                                    family);
4249    }
4250    return numHFiles;
4251  }
4252
4253  public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName,
4254                               final byte[] family) {
4255    int numHFiles = 0;
4256    for (Region region : rs.getRegions(tableName)) {
4257      numHFiles += region.getStore(family).getStorefilesCount();
4258    }
4259    return numHFiles;
4260  }
4261
4262  public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd, TableDescriptor rtd) {
4263    assertEquals(ltd.getValues().hashCode(), rtd.getValues().hashCode());
4264    Collection<ColumnFamilyDescriptor> ltdFamilies = Arrays.asList(ltd.getColumnFamilies());
4265    Collection<ColumnFamilyDescriptor> rtdFamilies = Arrays.asList(rtd.getColumnFamilies());
4266    assertEquals(ltdFamilies.size(), rtdFamilies.size());
4267    for (Iterator<ColumnFamilyDescriptor> it = ltdFamilies.iterator(), it2 =
4268         rtdFamilies.iterator(); it.hasNext();) {
4269      assertEquals(0,
4270          ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next()));
4271    }
4272  }
4273}