001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertTrue;
022import static org.junit.Assert.fail;
023
024import edu.umd.cs.findbugs.annotations.Nullable;
025import java.io.File;
026import java.io.IOException;
027import java.io.OutputStream;
028import java.io.UncheckedIOException;
029import java.lang.reflect.Field;
030import java.lang.reflect.Modifier;
031import java.net.BindException;
032import java.net.DatagramSocket;
033import java.net.InetAddress;
034import java.net.ServerSocket;
035import java.net.Socket;
036import java.net.UnknownHostException;
037import java.nio.charset.StandardCharsets;
038import java.security.MessageDigest;
039import java.util.ArrayList;
040import java.util.Arrays;
041import java.util.Collection;
042import java.util.Collections;
043import java.util.HashSet;
044import java.util.Iterator;
045import java.util.List;
046import java.util.Map;
047import java.util.NavigableSet;
048import java.util.Properties;
049import java.util.Random;
050import java.util.Set;
051import java.util.TreeSet;
052import java.util.concurrent.TimeUnit;
053import java.util.concurrent.atomic.AtomicReference;
054import java.util.stream.Collectors;
055import java.util.function.BooleanSupplier;
056import org.apache.commons.io.FileUtils;
057import org.apache.commons.lang3.RandomStringUtils;
058import org.apache.hadoop.conf.Configuration;
059import org.apache.hadoop.fs.FileSystem;
060import org.apache.hadoop.fs.Path;
061import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
062import org.apache.hadoop.hbase.Waiter.Predicate;
063import org.apache.hadoop.hbase.client.Admin;
064import org.apache.hadoop.hbase.client.BufferedMutator;
065import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
066import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
067import org.apache.hadoop.hbase.client.Connection;
068import org.apache.hadoop.hbase.client.ConnectionFactory;
069import org.apache.hadoop.hbase.client.Consistency;
070import org.apache.hadoop.hbase.client.Delete;
071import org.apache.hadoop.hbase.client.Durability;
072import org.apache.hadoop.hbase.client.Get;
073import org.apache.hadoop.hbase.client.HBaseAdmin;
074import org.apache.hadoop.hbase.client.Hbck;
075import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
076import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
077import org.apache.hadoop.hbase.client.MasterRegistry;
078import org.apache.hadoop.hbase.client.Put;
079import org.apache.hadoop.hbase.client.RegionInfo;
080import org.apache.hadoop.hbase.client.RegionInfoBuilder;
081import org.apache.hadoop.hbase.client.RegionLocator;
082import org.apache.hadoop.hbase.client.Result;
083import org.apache.hadoop.hbase.client.ResultScanner;
084import org.apache.hadoop.hbase.client.Scan;
085import org.apache.hadoop.hbase.client.Table;
086import org.apache.hadoop.hbase.client.TableDescriptor;
087import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
088import org.apache.hadoop.hbase.client.TableState;
089import org.apache.hadoop.hbase.fs.HFileSystem;
090import org.apache.hadoop.hbase.io.compress.Compression;
091import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
092import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
093import org.apache.hadoop.hbase.io.hfile.BlockCache;
094import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
095import org.apache.hadoop.hbase.io.hfile.HFile;
096import org.apache.hadoop.hbase.ipc.RpcServerInterface;
097import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
098import org.apache.hadoop.hbase.logging.Log4jUtils;
099import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
100import org.apache.hadoop.hbase.master.HMaster;
101import org.apache.hadoop.hbase.master.RegionState;
102import org.apache.hadoop.hbase.master.ServerManager;
103import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
104import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
105import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
106import org.apache.hadoop.hbase.master.assignment.RegionStates;
107import org.apache.hadoop.hbase.mob.MobFileCache;
108import org.apache.hadoop.hbase.regionserver.BloomType;
109import org.apache.hadoop.hbase.regionserver.ChunkCreator;
110import org.apache.hadoop.hbase.regionserver.HRegion;
111import org.apache.hadoop.hbase.regionserver.HRegionServer;
112import org.apache.hadoop.hbase.regionserver.HStore;
113import org.apache.hadoop.hbase.regionserver.InternalScanner;
114import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
115import org.apache.hadoop.hbase.regionserver.Region;
116import org.apache.hadoop.hbase.regionserver.RegionScanner;
117import org.apache.hadoop.hbase.regionserver.RegionServerServices;
118import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
119import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
120import org.apache.hadoop.hbase.security.User;
121import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
122import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
123import org.apache.hadoop.hbase.trace.TraceUtil;
124import org.apache.hadoop.hbase.util.Bytes;
125import org.apache.hadoop.hbase.util.CommonFSUtils;
126import org.apache.hadoop.hbase.util.FSTableDescriptors;
127import org.apache.hadoop.hbase.util.FSUtils;
128import org.apache.hadoop.hbase.util.JVMClusterUtil;
129import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
130import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
131import org.apache.hadoop.hbase.util.Pair;
132import org.apache.hadoop.hbase.util.RegionSplitter;
133import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
134import org.apache.hadoop.hbase.util.RetryCounter;
135import org.apache.hadoop.hbase.util.Threads;
136import org.apache.hadoop.hbase.wal.WAL;
137import org.apache.hadoop.hbase.wal.WALFactory;
138import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
139import org.apache.hadoop.hbase.zookeeper.ZKConfig;
140import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
141import org.apache.hadoop.hdfs.DFSClient;
142import org.apache.hadoop.hdfs.DistributedFileSystem;
143import org.apache.hadoop.hdfs.MiniDFSCluster;
144import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
145import org.apache.hadoop.mapred.JobConf;
146import org.apache.hadoop.mapred.MiniMRCluster;
147import org.apache.hadoop.mapred.TaskLog;
148import org.apache.hadoop.minikdc.MiniKdc;
149import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
150import org.apache.yetus.audience.InterfaceAudience;
151import org.apache.zookeeper.WatchedEvent;
152import org.apache.zookeeper.ZooKeeper;
153import org.apache.zookeeper.ZooKeeper.States;
154
155/**
156 * Facility for testing HBase. Replacement for
157 * old HBaseTestCase and HBaseClusterTestCase functionality.
158 * Create an instance and keep it around testing HBase.  This class is
159 * meant to be your one-stop shop for anything you might need testing.  Manages
160 * one cluster at a time only. Managed cluster can be an in-process
161 * {@link MiniHBaseCluster}, or a deployed cluster of type {@code DistributedHBaseCluster}.
162 * Not all methods work with the real cluster.
163 * Depends on log4j being on classpath and
164 * hbase-site.xml for logging and test-run configuration.  It does not set
165 * logging levels.
166 * In the configuration properties, default values for master-info-port and
167 * region-server-port are overridden such that a random port will be assigned (thus
168 * avoiding port contention if another local HBase instance is already running).
169 * <p>To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
170 * setting it to true.
171 */
172@InterfaceAudience.Public
173@SuppressWarnings("deprecation")
174public class HBaseTestingUtility extends HBaseZKTestingUtility {
175
176  /**
177   * System property key to get test directory value. Name is as it is because mini dfs has
178   * hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property
179   * used in mini dfs.
180   * @deprecated since 2.0.0 and will be removed in 3.0.0. Can be used only with mini dfs.
181   * @see <a href="https://issues.apache.org/jira/browse/HBASE-19410">HBASE-19410</a>
182   */
183  @Deprecated
184  private static final String TEST_DIRECTORY_KEY = "test.build.data";
185
186  public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
187  /**
188   * The default number of regions per regionserver when creating a pre-split
189   * table.
190   */
191  public static final int DEFAULT_REGIONS_PER_SERVER = 3;
192
193
194  public static final String PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table";
195  public static final boolean PRESPLIT_TEST_TABLE = true;
196
197  private MiniDFSCluster dfsCluster = null;
198
199  private volatile HBaseCluster hbaseCluster = null;
200  private MiniMRCluster mrCluster = null;
201
202  /** If there is a mini cluster running for this testing utility instance. */
203  private volatile boolean miniClusterRunning;
204
205  private String hadoopLogDir;
206
207  /** Directory on test filesystem where we put the data for this instance of
208    * HBaseTestingUtility*/
209  private Path dataTestDirOnTestFS = null;
210
211  private final AtomicReference<Connection> connection = new AtomicReference<>();
212
213  /** Filesystem URI used for map-reduce mini-cluster setup */
214  private static String FS_URI;
215
216  /** This is for unit tests parameterized with a single boolean. */
217  public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination();
218
219  /**
220   * Checks to see if a specific port is available.
221   *
222   * @param port the port number to check for availability
223   * @return <tt>true</tt> if the port is available, or <tt>false</tt> if not
224   */
225  public static boolean available(int port) {
226    ServerSocket ss = null;
227    DatagramSocket ds = null;
228    try {
229      ss = new ServerSocket(port);
230      ss.setReuseAddress(true);
231      ds = new DatagramSocket(port);
232      ds.setReuseAddress(true);
233      return true;
234    } catch (IOException e) {
235      // Do nothing
236    } finally {
237      if (ds != null) {
238        ds.close();
239      }
240
241      if (ss != null) {
242        try {
243          ss.close();
244        } catch (IOException e) {
245          /* should not be thrown */
246        }
247      }
248    }
249
250    return false;
251  }
252
253  /**
254   * Create all combinations of Bloom filters and compression algorithms for
255   * testing.
256   */
257  private static List<Object[]> bloomAndCompressionCombinations() {
258    List<Object[]> configurations = new ArrayList<>();
259    for (Compression.Algorithm comprAlgo :
260         HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
261      for (BloomType bloomType : BloomType.values()) {
262        configurations.add(new Object[] { comprAlgo, bloomType });
263      }
264    }
265    return Collections.unmodifiableList(configurations);
266  }
267
268  /**
269   * Create combination of memstoreTS and tags
270   */
271  private static List<Object[]> memStoreTSAndTagsCombination() {
272    List<Object[]> configurations = new ArrayList<>();
273    configurations.add(new Object[] { false, false });
274    configurations.add(new Object[] { false, true });
275    configurations.add(new Object[] { true, false });
276    configurations.add(new Object[] { true, true });
277    return Collections.unmodifiableList(configurations);
278  }
279
280  public static List<Object[]> memStoreTSTagsAndOffheapCombination() {
281    List<Object[]> configurations = new ArrayList<>();
282    configurations.add(new Object[] { false, false, true });
283    configurations.add(new Object[] { false, false, false });
284    configurations.add(new Object[] { false, true, true });
285    configurations.add(new Object[] { false, true, false });
286    configurations.add(new Object[] { true, false, true });
287    configurations.add(new Object[] { true, false, false });
288    configurations.add(new Object[] { true, true, true });
289    configurations.add(new Object[] { true, true, false });
290    return Collections.unmodifiableList(configurations);
291  }
292
293  public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
294      bloomAndCompressionCombinations();
295
296
297  /**
298   * <p>Create an HBaseTestingUtility using a default configuration.
299   *
300   * <p>Initially, all tmp files are written to a local test data directory.
301   * Once {@link #startMiniDFSCluster} is called, either directly or via
302   * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead.
303   *
304   * <p>Previously, there was a distinction between the type of utility returned by
305   * {@link #createLocalHTU()} and this constructor; this is no longer the case. All
306   * HBaseTestingUtility objects will behave as local until a DFS cluster is started,
307   * at which point they will switch to using mini DFS for storage.
308   */
309  public HBaseTestingUtility() {
310    this(HBaseConfiguration.create());
311  }
312
313  /**
314   * <p>Create an HBaseTestingUtility using a given configuration.
315   *
316   * <p>Initially, all tmp files are written to a local test data directory.
317   * Once {@link #startMiniDFSCluster} is called, either directly or via
318   * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead.
319   *
320   * <p>Previously, there was a distinction between the type of utility returned by
321   * {@link #createLocalHTU()} and this constructor; this is no longer the case. All
322   * HBaseTestingUtility objects will behave as local until a DFS cluster is started,
323   * at which point they will switch to using mini DFS for storage.
324   *
325   * @param conf The configuration to use for further operations
326   */
327  public HBaseTestingUtility(@Nullable Configuration conf) {
328    super(conf);
329
330    // a hbase checksum verification failure will cause unit tests to fail
331    ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
332
333    // Save this for when setting default file:// breaks things
334    if (this.conf.get("fs.defaultFS") != null) {
335      this.conf.set("original.defaultFS", this.conf.get("fs.defaultFS"));
336    }
337    if (this.conf.get(HConstants.HBASE_DIR) != null) {
338      this.conf.set("original.hbase.dir", this.conf.get(HConstants.HBASE_DIR));
339    }
340    // Every cluster is a local cluster until we start DFS
341    // Note that conf could be null, but this.conf will not be
342    String dataTestDir = getDataTestDir().toString();
343    this.conf.set("fs.defaultFS","file:///");
344    this.conf.set(HConstants.HBASE_DIR, "file://" + dataTestDir);
345    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);
346    this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE,false);
347    // If the value for random ports isn't set set it to true, thus making
348    // tests opt-out for random port assignment
349    this.conf.setBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS,
350        this.conf.getBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true));
351  }
352
353  /**
354   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #HBaseTestingUtility()}
355   *   instead.
356   * @return a normal HBaseTestingUtility
357   * @see #HBaseTestingUtility()
358   * @see <a href="https://issues.apache.org/jira/browse/HBASE-19841">HBASE-19841</a>
359   */
360  @Deprecated
361  public static HBaseTestingUtility createLocalHTU() {
362    return new HBaseTestingUtility();
363  }
364
365  /**
366   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
367   *   {@link #HBaseTestingUtility(Configuration)} instead.
368   * @return a normal HBaseTestingUtility
369   * @see #HBaseTestingUtility(Configuration)
370   * @see <a href="https://issues.apache.org/jira/browse/HBASE-19841">HBASE-19841</a>
371   */
372  @Deprecated
373  public static HBaseTestingUtility createLocalHTU(Configuration c) {
374    return new HBaseTestingUtility(c);
375  }
376
377  /**
378   * Close both the region {@code r} and it's underlying WAL. For use in tests.
379   */
380  public static void closeRegionAndWAL(final Region r) throws IOException {
381    closeRegionAndWAL((HRegion)r);
382  }
383
384  /**
385   * Close both the HRegion {@code r} and it's underlying WAL. For use in tests.
386   */
387  public static void closeRegionAndWAL(final HRegion r) throws IOException {
388    if (r == null) return;
389    r.close();
390    if (r.getWAL() == null) return;
391    r.getWAL().close();
392  }
393
394  /**
395   * Returns this classes's instance of {@link Configuration}.  Be careful how
396   * you use the returned Configuration since {@link Connection} instances
397   * can be shared.  The Map of Connections is keyed by the Configuration.  If
398   * say, a Connection was being used against a cluster that had been shutdown,
399   * see {@link #shutdownMiniCluster()}, then the Connection will no longer
400   * be wholesome.  Rather than use the return direct, its usually best to
401   * make a copy and use that.  Do
402   * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
403   * @return Instance of Configuration.
404   */
405  @Override
406  public Configuration getConfiguration() {
407    return super.getConfiguration();
408  }
409
410  public void setHBaseCluster(HBaseCluster hbaseCluster) {
411    this.hbaseCluster = hbaseCluster;
412  }
413
414  /**
415   * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
416   * Give it a random name so can have many concurrent tests running if
417   * we need to.  It needs to amend the {@link #TEST_DIRECTORY_KEY}
418   * System property, as it's what minidfscluster bases
419   * it data dir on.  Moding a System property is not the way to do concurrent
420   * instances -- another instance could grab the temporary
421   * value unintentionally -- but not anything can do about it at moment;
422   * single instance only is how the minidfscluster works.
423   *
424   * We also create the underlying directory names for
425   *  hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values
426   *  in the conf, and as a system property for hadoop.tmp.dir (We do not create them!).
427   *
428   * @return The calculated data test build directory, if newly-created.
429   */
430  @Override
431  protected Path setupDataTestDir() {
432    Path testPath = super.setupDataTestDir();
433    if (null == testPath) {
434      return null;
435    }
436
437    createSubDirAndSystemProperty(
438      "hadoop.log.dir",
439      testPath, "hadoop-log-dir");
440
441    // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
442    //  we want our own value to ensure uniqueness on the same machine
443    createSubDirAndSystemProperty(
444      "hadoop.tmp.dir",
445      testPath, "hadoop-tmp-dir");
446
447    // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
448    createSubDir(
449      "mapreduce.cluster.local.dir",
450      testPath, "mapred-local-dir");
451    return testPath;
452  }
453
454  private void createSubDirAndSystemProperty(
455    String propertyName, Path parent, String subDirName){
456
457    String sysValue = System.getProperty(propertyName);
458
459    if (sysValue != null) {
460      // There is already a value set. So we do nothing but hope
461      //  that there will be no conflicts
462      LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
463        sysValue + " so I do NOT create it in " + parent);
464      String confValue = conf.get(propertyName);
465      if (confValue != null && !confValue.endsWith(sysValue)){
466       LOG.warn(
467         propertyName + " property value differs in configuration and system: "+
468         "Configuration="+confValue+" while System="+sysValue+
469         " Erasing configuration value by system value."
470       );
471      }
472      conf.set(propertyName, sysValue);
473    } else {
474      // Ok, it's not set, so we create it as a subdirectory
475      createSubDir(propertyName, parent, subDirName);
476      System.setProperty(propertyName, conf.get(propertyName));
477    }
478  }
479
480  /**
481   * @return Where to write test data on the test filesystem; Returns working directory
482   * for the test filesystem by default
483   * @see #setupDataTestDirOnTestFS()
484   * @see #getTestFileSystem()
485   */
486  private Path getBaseTestDirOnTestFS() throws IOException {
487    FileSystem fs = getTestFileSystem();
488    return new Path(fs.getWorkingDirectory(), "test-data");
489  }
490
491  /**
492   * @return META table descriptor
493   * @deprecated since 2.0 version and will be removed in 3.0 version. Currently for test only.
494   *             use {@link #getMetaTableDescriptorBuilder()}
495   */
496  @Deprecated
497  public HTableDescriptor getMetaTableDescriptor() {
498    return new ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());
499  }
500
501  /**
502   * @return META table descriptor
503   * @deprecated Since 2.3.0. No one should be using this internal. Used in testing only.
504   */
505  @Deprecated
506  @InterfaceAudience.Private
507  public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
508    try {
509      return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
510    } catch (IOException e) {
511      throw new RuntimeException("Unable to create META table descriptor", e);
512    }
513  }
514
515  /**
516   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
517   * to write temporary test data. Call this method after setting up the mini dfs cluster
518   * if the test relies on it.
519   * @return a unique path in the test filesystem
520   */
521  public Path getDataTestDirOnTestFS() throws IOException {
522    if (dataTestDirOnTestFS == null) {
523      setupDataTestDirOnTestFS();
524    }
525
526    return dataTestDirOnTestFS;
527  }
528
529  /**
530   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
531   * to write temporary test data. Call this method after setting up the mini dfs cluster
532   * if the test relies on it.
533   * @return a unique path in the test filesystem
534   * @param subdirName name of the subdir to create under the base test dir
535   */
536  public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
537    return new Path(getDataTestDirOnTestFS(), subdirName);
538  }
539
540  /**
541   * Sets up a path in test filesystem to be used by tests.
542   * Creates a new directory if not already setup.
543   */
544  private void setupDataTestDirOnTestFS() throws IOException {
545    if (dataTestDirOnTestFS != null) {
546      LOG.warn("Data test on test fs dir already setup in "
547          + dataTestDirOnTestFS.toString());
548      return;
549    }
550    dataTestDirOnTestFS = getNewDataTestDirOnTestFS();
551  }
552
553  /**
554   * Sets up a new path in test filesystem to be used by tests.
555   */
556  private Path getNewDataTestDirOnTestFS() throws IOException {
557    //The file system can be either local, mini dfs, or if the configuration
558    //is supplied externally, it can be an external cluster FS. If it is a local
559    //file system, the tests should use getBaseTestDir, otherwise, we can use
560    //the working directory, and create a unique sub dir there
561    FileSystem fs = getTestFileSystem();
562    Path newDataTestDir;
563    String randomStr = getRandomUUID().toString();
564    if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
565      newDataTestDir = new Path(getDataTestDir(), randomStr);
566      File dataTestDir = new File(newDataTestDir.toString());
567      if (deleteOnExit()) dataTestDir.deleteOnExit();
568    } else {
569      Path base = getBaseTestDirOnTestFS();
570      newDataTestDir = new Path(base, randomStr);
571      if (deleteOnExit()) fs.deleteOnExit(newDataTestDir);
572    }
573    return newDataTestDir;
574  }
575
576  /**
577   * Cleans the test data directory on the test filesystem.
578   * @return True if we removed the test dirs
579   * @throws IOException
580   */
581  public boolean cleanupDataTestDirOnTestFS() throws IOException {
582    boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
583    if (ret)
584      dataTestDirOnTestFS = null;
585    return ret;
586  }
587
588  /**
589   * Cleans a subdirectory under the test data directory on the test filesystem.
590   * @return True if we removed child
591   * @throws IOException
592   */
593  public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
594    Path cpath = getDataTestDirOnTestFS(subdirName);
595    return getTestFileSystem().delete(cpath, true);
596  }
597
598  /**
599   * Start a minidfscluster.
600   * @param servers How many DNs to start.
601   * @throws Exception
602   * @see #shutdownMiniDFSCluster()
603   * @return The mini dfs cluster created.
604   */
605  public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
606    return startMiniDFSCluster(servers, null);
607  }
608
609  /**
610   * Start a minidfscluster.
611   * This is useful if you want to run datanode on distinct hosts for things
612   * like HDFS block location verification.
613   * If you start MiniDFSCluster without host names, all instances of the
614   * datanodes will have the same host name.
615   * @param hosts hostnames DNs to run on.
616   * @throws Exception
617   * @see #shutdownMiniDFSCluster()
618   * @return The mini dfs cluster created.
619   */
620  public MiniDFSCluster startMiniDFSCluster(final String hosts[])
621  throws Exception {
622    if ( hosts != null && hosts.length != 0) {
623      return startMiniDFSCluster(hosts.length, hosts);
624    } else {
625      return startMiniDFSCluster(1, null);
626    }
627  }
628
629  /**
630   * Start a minidfscluster.
631   * Can only create one.
632   * @param servers How many DNs to start.
633   * @param hosts hostnames DNs to run on.
634   * @throws Exception
635   * @see #shutdownMiniDFSCluster()
636   * @return The mini dfs cluster created.
637   */
638  public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
639  throws Exception {
640    return startMiniDFSCluster(servers, null, hosts);
641  }
642
643  private void setFs() throws IOException {
644    if(this.dfsCluster == null){
645      LOG.info("Skipping setting fs because dfsCluster is null");
646      return;
647    }
648    FileSystem fs = this.dfsCluster.getFileSystem();
649    CommonFSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
650
651    // re-enable this check with dfs
652    conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE);
653  }
654
655  public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
656      throws Exception {
657    createDirsAndSetProperties();
658    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
659
660    // Error level to skip some warnings specific to the minicluster. See HBASE-4709
661    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR");
662    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(),
663      "ERROR");
664
665    TraceUtil.initTracer(conf);
666
667    this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
668        true, null, racks, hosts, null);
669
670    // Set this just-started cluster as our filesystem.
671    setFs();
672
673    // Wait for the cluster to be totally up
674    this.dfsCluster.waitClusterUp();
675
676    //reset the test directory for test file system
677    dataTestDirOnTestFS = null;
678    String dataTestDir = getDataTestDir().toString();
679    conf.set(HConstants.HBASE_DIR, dataTestDir);
680    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);
681
682    return this.dfsCluster;
683  }
684
685  public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException {
686    createDirsAndSetProperties();
687    // Error level to skip some warnings specific to the minicluster. See HBASE-4709
688    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR");
689    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(),
690      "ERROR");
691    dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
692        null, null, null);
693    return dfsCluster;
694  }
695
696  /**
697   * This is used before starting HDFS and map-reduce mini-clusters Run something like the below to
698   * check for the likes of '/tmp' references -- i.e. references outside of the test data dir -- in
699   * the conf.
700   *
701   * <pre>
702   * Configuration conf = TEST_UTIL.getConfiguration();
703   * for (Iterator&lt;Map.Entry&lt;String, String&gt;&gt; i = conf.iterator(); i.hasNext();) {
704   *   Map.Entry&lt;String, String&gt; e = i.next();
705   *   assertFalse(e.getKey() + " " + e.getValue(), e.getValue().contains("/tmp"));
706   * }
707   * </pre>
708   */
709  private void createDirsAndSetProperties() throws IOException {
710    setupClusterTestDir();
711    conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
712    System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
713    createDirAndSetProperty("test.cache.data");
714    createDirAndSetProperty("hadoop.tmp.dir");
715    hadoopLogDir = createDirAndSetProperty("hadoop.log.dir");
716    createDirAndSetProperty("mapreduce.cluster.local.dir");
717    createDirAndSetProperty("mapreduce.cluster.temp.dir");
718    enableShortCircuit();
719
720    Path root = getDataTestDirOnTestFS("hadoop");
721    conf.set(MapreduceTestingShim.getMROutputDirProp(),
722      new Path(root, "mapred-output-dir").toString());
723    conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());
724    conf.set("mapreduce.jobtracker.staging.root.dir",
725      new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
726    conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());
727    conf.set("yarn.app.mapreduce.am.staging-dir",
728      new Path(root, "mapreduce-am-staging-root-dir").toString());
729
730    // Frustrate yarn's and hdfs's attempts at writing /tmp.
731    // Below is fragile. Make it so we just interpolate any 'tmp' reference.
732    createDirAndSetProperty("yarn.node-labels.fs-store.root-dir");
733    createDirAndSetProperty("yarn.node-attribute.fs-store.root-dir");
734    createDirAndSetProperty("yarn.nodemanager.log-dirs");
735    createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
736    createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.active-dir");
737    createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.done-dir");
738    createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
739    createDirAndSetProperty("dfs.journalnode.edits.dir");
740    createDirAndSetProperty("dfs.datanode.shared.file.descriptor.paths");
741    createDirAndSetProperty("nfs.dump.dir");
742    createDirAndSetProperty("java.io.tmpdir");
743    createDirAndSetProperty("dfs.journalnode.edits.dir");
744    createDirAndSetProperty("dfs.provided.aliasmap.inmemory.leveldb.dir");
745    createDirAndSetProperty("fs.s3a.committer.staging.tmp.path");
746  }
747
748  /**
749   *  Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating
750   *  new column families. Default to false.
751   */
752  public boolean isNewVersionBehaviorEnabled(){
753    final String propName = "hbase.tests.new.version.behavior";
754    String v = System.getProperty(propName);
755    if (v != null){
756      return Boolean.parseBoolean(v);
757    }
758    return false;
759  }
760
761  /**
762   *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
763   *  This allows to specify this parameter on the command line.
764   *   If not set, default is true.
765   */
766  public boolean isReadShortCircuitOn(){
767    final String propName = "hbase.tests.use.shortcircuit.reads";
768    String readOnProp = System.getProperty(propName);
769    if (readOnProp != null){
770      return  Boolean.parseBoolean(readOnProp);
771    } else {
772      return conf.getBoolean(propName, false);
773    }
774  }
775
776  /** Enable the short circuit read, unless configured differently.
777   * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
778   */
779  private void enableShortCircuit() {
780    if (isReadShortCircuitOn()) {
781      String curUser = System.getProperty("user.name");
782      LOG.info("read short circuit is ON for user " + curUser);
783      // read short circuit, for hdfs
784      conf.set("dfs.block.local-path-access.user", curUser);
785      // read short circuit, for hbase
786      conf.setBoolean("dfs.client.read.shortcircuit", true);
787      // Skip checking checksum, for the hdfs client and the datanode
788      conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
789    } else {
790      LOG.info("read short circuit is OFF");
791    }
792  }
793
794  private String createDirAndSetProperty(String property) {
795    return createDirAndSetProperty(property, property);
796  }
797
798  private String createDirAndSetProperty(final String relPath, String property) {
799    String path = getDataTestDir(relPath).toString();
800    System.setProperty(property, path);
801    conf.set(property, path);
802    new File(path).mkdirs();
803    LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
804    return path;
805  }
806
807  /**
808   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
809   * or does nothing.
810   * @throws IOException
811   */
812  public void shutdownMiniDFSCluster() throws IOException {
813    if (this.dfsCluster != null) {
814      // The below throws an exception per dn, AsynchronousCloseException.
815      this.dfsCluster.shutdown();
816      dfsCluster = null;
817      dataTestDirOnTestFS = null;
818      CommonFSUtils.setFsDefault(this.conf, new Path("file:///"));
819    }
820  }
821
822  /**
823   * Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
824   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
825   * @param createWALDir Whether to create a new WAL directory.
826   * @return The mini HBase cluster created.
827   * @see #shutdownMiniCluster()
828   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
829   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
830   * @see #startMiniCluster(StartMiniClusterOption)
831   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
832   */
833  @Deprecated
834  public MiniHBaseCluster startMiniCluster(boolean createWALDir) throws Exception {
835    StartMiniClusterOption option = StartMiniClusterOption.builder()
836        .createWALDir(createWALDir).build();
837    return startMiniCluster(option);
838  }
839
840  /**
841   * Start up a minicluster of hbase, dfs, and zookeeper.
842   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
843   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
844   * @param createRootDir Whether to create a new root or data directory path.
845   * @return The mini HBase cluster created.
846   * @see #shutdownMiniCluster()
847   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
848   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
849   * @see #startMiniCluster(StartMiniClusterOption)
850   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
851   */
852  @Deprecated
853  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir)
854  throws Exception {
855    StartMiniClusterOption option = StartMiniClusterOption.builder()
856        .numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir).build();
857    return startMiniCluster(option);
858  }
859
860  /**
861   * Start up a minicluster of hbase, dfs, and zookeeper.
862   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
863   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
864   * @param createRootDir Whether to create a new root or data directory path.
865   * @param createWALDir Whether to create a new WAL directory.
866   * @return The mini HBase cluster created.
867   * @see #shutdownMiniCluster()
868   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
869   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
870   * @see #startMiniCluster(StartMiniClusterOption)
871   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
872   */
873  @Deprecated
874  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir,
875      boolean createWALDir) throws Exception {
876    StartMiniClusterOption option = StartMiniClusterOption.builder()
877        .numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir)
878        .createWALDir(createWALDir).build();
879    return startMiniCluster(option);
880  }
881
882  /**
883   * Start up a minicluster of hbase, dfs, and zookeeper.
884   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
885   * @param numMasters Master node number.
886   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
887   * @param createRootDir Whether to create a new root or data directory path.
888   * @return The mini HBase cluster created.
889   * @see #shutdownMiniCluster()
890   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
891   *  {@link #startMiniCluster(StartMiniClusterOption)} instead.
892   * @see #startMiniCluster(StartMiniClusterOption)
893   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
894   */
895  @Deprecated
896  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, boolean createRootDir)
897    throws Exception {
898    StartMiniClusterOption option = StartMiniClusterOption.builder()
899        .numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
900        .numDataNodes(numSlaves).build();
901    return startMiniCluster(option);
902  }
903
904  /**
905   * Start up a minicluster of hbase, dfs, and zookeeper.
906   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
907   * @param numMasters Master node number.
908   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
909   * @return The mini HBase cluster created.
910   * @see #shutdownMiniCluster()
911   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
912   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
913   * @see #startMiniCluster(StartMiniClusterOption)
914   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
915   */
916  @Deprecated
917  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves) throws Exception {
918    StartMiniClusterOption option = StartMiniClusterOption.builder()
919        .numMasters(numMasters).numRegionServers(numSlaves).numDataNodes(numSlaves).build();
920    return startMiniCluster(option);
921  }
922
923  /**
924   * Start up a minicluster of hbase, dfs, and zookeeper.
925   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
926   * @param numMasters Master node number.
927   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
928   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
929   *                      HDFS data node number.
930   * @param createRootDir Whether to create a new root or data directory path.
931   * @return The mini HBase cluster created.
932   * @see #shutdownMiniCluster()
933   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
934   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
935   * @see #startMiniCluster(StartMiniClusterOption)
936   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
937   */
938  @Deprecated
939  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
940      boolean createRootDir) throws Exception {
941    StartMiniClusterOption option = StartMiniClusterOption.builder()
942        .numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
943        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
944    return startMiniCluster(option);
945  }
946
947  /**
948   * Start up a minicluster of hbase, dfs, and zookeeper.
949   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
950   * @param numMasters Master node number.
951   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
952   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
953   *                      HDFS data node number.
954   * @return The mini HBase cluster created.
955   * @see #shutdownMiniCluster()
956   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
957   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
958   * @see #startMiniCluster(StartMiniClusterOption)
959   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
960   */
961  @Deprecated
962  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts)
963      throws Exception {
964    StartMiniClusterOption option = StartMiniClusterOption.builder()
965        .numMasters(numMasters).numRegionServers(numSlaves)
966        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
967    return startMiniCluster(option);
968  }
969
970  /**
971   * Start up a minicluster of hbase, dfs, and zookeeper.
972   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
973   * @param numMasters Master node number.
974   * @param numRegionServers Number of region servers.
975   * @param numDataNodes Number of datanodes.
976   * @return The mini HBase cluster created.
977   * @see #shutdownMiniCluster()
978   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
979   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
980   * @see #startMiniCluster(StartMiniClusterOption)
981   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
982   */
983  @Deprecated
984  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes)
985      throws Exception {
986    StartMiniClusterOption option = StartMiniClusterOption.builder()
987        .numMasters(numMasters).numRegionServers(numRegionServers).numDataNodes(numDataNodes)
988        .build();
989    return startMiniCluster(option);
990  }
991
992  /**
993   * Start up a minicluster of hbase, dfs, and zookeeper.
994   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
995   * @param numMasters Master node number.
996   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
997   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
998   *                      HDFS data node number.
999   * @param masterClass The class to use as HMaster, or null for default.
1000   * @param rsClass The class to use as HRegionServer, or null for default.
1001   * @return The mini HBase cluster created.
1002   * @see #shutdownMiniCluster()
1003   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1004   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
1005   * @see #startMiniCluster(StartMiniClusterOption)
1006   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1007   */
1008  @Deprecated
1009  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
1010      Class<? extends HMaster> masterClass,
1011      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass)
1012      throws Exception {
1013    StartMiniClusterOption option = StartMiniClusterOption.builder()
1014        .numMasters(numMasters).masterClass(masterClass)
1015        .numRegionServers(numSlaves).rsClass(rsClass)
1016        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts)
1017        .build();
1018    return startMiniCluster(option);
1019  }
1020
1021  /**
1022   * Start up a minicluster of hbase, dfs, and zookeeper.
1023   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1024   * @param numMasters Master node number.
1025   * @param numRegionServers Number of region servers.
1026   * @param numDataNodes Number of datanodes.
1027   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
1028   *                      HDFS data node number.
1029   * @param masterClass The class to use as HMaster, or null for default.
1030   * @param rsClass The class to use as HRegionServer, or null for default.
1031   * @return The mini HBase cluster created.
1032   * @see #shutdownMiniCluster()
1033   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1034   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
1035   * @see #startMiniCluster(StartMiniClusterOption)
1036   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1037   */
1038  @Deprecated
1039  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
1040      String[] dataNodeHosts, Class<? extends HMaster> masterClass,
1041      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass)
1042    throws Exception {
1043    StartMiniClusterOption option = StartMiniClusterOption.builder()
1044        .numMasters(numMasters).masterClass(masterClass)
1045        .numRegionServers(numRegionServers).rsClass(rsClass)
1046        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
1047        .build();
1048    return startMiniCluster(option);
1049  }
1050
1051  /**
1052   * Start up a minicluster of hbase, dfs, and zookeeper.
1053   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1054   * @param numMasters Master node number.
1055   * @param numRegionServers Number of region servers.
1056   * @param numDataNodes Number of datanodes.
1057   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
1058   *                      HDFS data node number.
1059   * @param masterClass The class to use as HMaster, or null for default.
1060   * @param rsClass The class to use as HRegionServer, or null for default.
1061   * @param createRootDir Whether to create a new root or data directory path.
1062   * @param createWALDir Whether to create a new WAL directory.
1063   * @return The mini HBase cluster created.
1064   * @see #shutdownMiniCluster()
1065   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1066   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
1067   * @see #startMiniCluster(StartMiniClusterOption)
1068   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1069   */
1070  @Deprecated
1071  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
1072      String[] dataNodeHosts, Class<? extends HMaster> masterClass,
1073      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass, boolean createRootDir,
1074      boolean createWALDir) throws Exception {
1075    StartMiniClusterOption option = StartMiniClusterOption.builder()
1076        .numMasters(numMasters).masterClass(masterClass)
1077        .numRegionServers(numRegionServers).rsClass(rsClass)
1078        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
1079        .createRootDir(createRootDir).createWALDir(createWALDir)
1080        .build();
1081    return startMiniCluster(option);
1082  }
1083
1084  /**
1085   * Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number.
1086   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1087   * @param numSlaves slave node number, for both HBase region server and HDFS data node.
1088   * @see #startMiniCluster(StartMiniClusterOption option)
1089   * @see #shutdownMiniDFSCluster()
1090   */
1091  public MiniHBaseCluster startMiniCluster(int numSlaves) throws Exception {
1092    StartMiniClusterOption option = StartMiniClusterOption.builder()
1093        .numRegionServers(numSlaves).numDataNodes(numSlaves).build();
1094    return startMiniCluster(option);
1095  }
1096
1097  /**
1098   * Start up a minicluster of hbase, dfs and zookeeper all using default options.
1099   * Option default value can be found in {@link StartMiniClusterOption.Builder}.
1100   * @see #startMiniCluster(StartMiniClusterOption option)
1101   * @see #shutdownMiniDFSCluster()
1102   */
1103  public MiniHBaseCluster startMiniCluster() throws Exception {
1104    return startMiniCluster(StartMiniClusterOption.builder().build());
1105  }
1106
1107  /**
1108   * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed.
1109   * It modifies Configuration.  It homes the cluster data directory under a random
1110   * subdirectory in a directory under System property test.build.data, to be cleaned up on exit.
1111   * @see #shutdownMiniDFSCluster()
1112   */
1113  public MiniHBaseCluster startMiniCluster(StartMiniClusterOption option) throws Exception {
1114    LOG.info("Starting up minicluster with option: {}", option);
1115
1116    // If we already put up a cluster, fail.
1117    if (miniClusterRunning) {
1118      throw new IllegalStateException("A mini-cluster is already running");
1119    }
1120    miniClusterRunning = true;
1121
1122    setupClusterTestDir();
1123    System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
1124
1125    // Bring up mini dfs cluster. This spews a bunch of warnings about missing
1126    // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
1127    if (dfsCluster == null) {
1128      LOG.info("STARTING DFS");
1129      dfsCluster = startMiniDFSCluster(option.getNumDataNodes(), option.getDataNodeHosts());
1130    } else {
1131      LOG.info("NOT STARTING DFS");
1132    }
1133
1134    // Start up a zk cluster.
1135    if (getZkCluster() == null) {
1136      startMiniZKCluster(option.getNumZkServers());
1137    }
1138
1139    // Start the MiniHBaseCluster
1140    return startMiniHBaseCluster(option);
1141  }
1142
1143  /**
1144   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
1145   * {@link #startMiniCluster()}. This is useful when doing stepped startup of clusters.
1146   * @return Reference to the hbase mini hbase cluster.
1147   * @see #startMiniCluster(StartMiniClusterOption)
1148   * @see #shutdownMiniHBaseCluster()
1149   */
1150  public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option)
1151    throws IOException, InterruptedException {
1152    // Now do the mini hbase cluster. Set the hbase.rootdir in config.
1153    createRootDir(option.isCreateRootDir());
1154    if (option.isCreateWALDir()) {
1155      createWALRootDir();
1156    }
1157    // Set the hbase.fs.tmp.dir config to make sure that we have some default value. This is
1158    // for tests that do not read hbase-defaults.xml
1159    setHBaseFsTmpDir();
1160
1161    // These settings will make the server waits until this exact number of
1162    // regions servers are connected.
1163    if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
1164      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, option.getNumRegionServers());
1165    }
1166    if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
1167      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, option.getNumRegionServers());
1168    }
1169
1170    // Avoid log flooded with chore execution time, see HBASE-24646 for more details.
1171    Log4jUtils.setLogLevel(org.apache.hadoop.hbase.ScheduledChore.class.getName(), "INFO");
1172
1173    Configuration c = new Configuration(this.conf);
1174    TraceUtil.initTracer(c);
1175    this.hbaseCluster = new MiniHBaseCluster(c, option.getNumMasters(),
1176      option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(),
1177      option.getMasterClass(), option.getRsClass());
1178    // Populate the master address configuration from mini cluster configuration.
1179    conf.set(HConstants.MASTER_ADDRS_KEY, MasterRegistry.getMasterAddr(c));
1180    // Don't leave here till we've done a successful scan of the hbase:meta
1181    try (Table t = getConnection().getTable(TableName.META_TABLE_NAME);
1182      ResultScanner s = t.getScanner(new Scan())) {
1183      for (;;) {
1184        if (s.next() == null) {
1185          break;
1186        }
1187      }
1188    }
1189
1190
1191    getAdmin(); // create immediately the hbaseAdmin
1192    LOG.info("Minicluster is up; activeMaster={}", getHBaseCluster().getMaster());
1193
1194    return (MiniHBaseCluster) hbaseCluster;
1195  }
1196
1197  /**
1198   * Starts up mini hbase cluster using default options.
1199   * Default options can be found in {@link StartMiniClusterOption.Builder}.
1200   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1201   * @see #shutdownMiniHBaseCluster()
1202   */
1203  public MiniHBaseCluster startMiniHBaseCluster() throws IOException, InterruptedException {
1204    return startMiniHBaseCluster(StartMiniClusterOption.builder().build());
1205  }
1206
1207  /**
1208   * Starts up mini hbase cluster.
1209   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
1210   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1211   * @param numMasters Master node number.
1212   * @param numRegionServers Number of region servers.
1213   * @return The mini HBase cluster created.
1214   * @see #shutdownMiniHBaseCluster()
1215   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1216   *   {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1217   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1218   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1219   */
1220  @Deprecated
1221  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers)
1222      throws IOException, InterruptedException {
1223    StartMiniClusterOption option = StartMiniClusterOption.builder()
1224        .numMasters(numMasters).numRegionServers(numRegionServers).build();
1225    return startMiniHBaseCluster(option);
1226  }
1227
1228  /**
1229   * Starts up mini hbase cluster.
1230   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
1231   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1232   * @param numMasters Master node number.
1233   * @param numRegionServers Number of region servers.
1234   * @param rsPorts Ports that RegionServer should use.
1235   * @return The mini HBase cluster created.
1236   * @see #shutdownMiniHBaseCluster()
1237   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1238   *   {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1239   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1240   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1241   */
1242  @Deprecated
1243  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
1244      List<Integer> rsPorts) throws IOException, InterruptedException {
1245    StartMiniClusterOption option = StartMiniClusterOption.builder()
1246        .numMasters(numMasters).numRegionServers(numRegionServers).rsPorts(rsPorts).build();
1247    return startMiniHBaseCluster(option);
1248  }
1249
1250  /**
1251   * Starts up mini hbase cluster.
1252   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
1253   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1254   * @param numMasters Master node number.
1255   * @param numRegionServers Number of region servers.
1256   * @param rsPorts Ports that RegionServer should use.
1257   * @param masterClass The class to use as HMaster, or null for default.
1258   * @param rsClass The class to use as HRegionServer, or null for default.
1259   * @param createRootDir Whether to create a new root or data directory path.
1260   * @param createWALDir Whether to create a new WAL directory.
1261   * @return The mini HBase cluster created.
1262   * @see #shutdownMiniHBaseCluster()
1263   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1264   *   {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1265   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1266   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1267   */
1268  @Deprecated
1269  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
1270      List<Integer> rsPorts, Class<? extends HMaster> masterClass,
1271      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass,
1272      boolean createRootDir, boolean createWALDir) throws IOException, InterruptedException {
1273    StartMiniClusterOption option = StartMiniClusterOption.builder()
1274        .numMasters(numMasters).masterClass(masterClass)
1275        .numRegionServers(numRegionServers).rsClass(rsClass).rsPorts(rsPorts)
1276        .createRootDir(createRootDir).createWALDir(createWALDir).build();
1277    return startMiniHBaseCluster(option);
1278  }
1279
1280  /**
1281   * Starts the hbase cluster up again after shutting it down previously in a
1282   * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
1283   * @param servers number of region servers
1284   */
1285  public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
1286    this.restartHBaseCluster(servers, null);
1287  }
1288
1289  public void restartHBaseCluster(int servers, List<Integer> ports)
1290      throws IOException, InterruptedException {
1291    StartMiniClusterOption option =
1292        StartMiniClusterOption.builder().numRegionServers(servers).rsPorts(ports).build();
1293    restartHBaseCluster(option);
1294    invalidateConnection();
1295  }
1296
1297  public void restartHBaseCluster(StartMiniClusterOption option)
1298      throws IOException, InterruptedException {
1299    closeConnection();
1300    this.hbaseCluster =
1301        new MiniHBaseCluster(this.conf, option.getNumMasters(), option.getNumAlwaysStandByMasters(),
1302            option.getNumRegionServers(), option.getRsPorts(), option.getMasterClass(),
1303            option.getRsClass());
1304    // Don't leave here till we've done a successful scan of the hbase:meta
1305    Connection conn = ConnectionFactory.createConnection(this.conf);
1306    Table t = conn.getTable(TableName.META_TABLE_NAME);
1307    ResultScanner s = t.getScanner(new Scan());
1308    while (s.next() != null) {
1309      // do nothing
1310    }
1311    LOG.info("HBase has been restarted");
1312    s.close();
1313    t.close();
1314    conn.close();
1315  }
1316
1317  /**
1318   * @return Current mini hbase cluster. Only has something in it after a call
1319   * to {@link #startMiniCluster()}.
1320   * @see #startMiniCluster()
1321   */
1322  public MiniHBaseCluster getMiniHBaseCluster() {
1323    if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
1324      return (MiniHBaseCluster)this.hbaseCluster;
1325    }
1326    throw new RuntimeException(hbaseCluster + " not an instance of " +
1327                               MiniHBaseCluster.class.getName());
1328  }
1329
1330  /**
1331   * Stops mini hbase, zk, and hdfs clusters.
1332   * @see #startMiniCluster(int)
1333   */
1334  public void shutdownMiniCluster() throws IOException {
1335    LOG.info("Shutting down minicluster");
1336    shutdownMiniHBaseCluster();
1337    shutdownMiniDFSCluster();
1338    shutdownMiniZKCluster();
1339
1340    cleanupTestDir();
1341    miniClusterRunning = false;
1342    LOG.info("Minicluster is down");
1343  }
1344
1345  /**
1346   * Shutdown HBase mini cluster.Does not shutdown zk or dfs if running.
1347   * @throws java.io.IOException in case command is unsuccessful
1348   */
1349  public void shutdownMiniHBaseCluster() throws IOException {
1350    cleanup();
1351    if (this.hbaseCluster != null) {
1352      this.hbaseCluster.shutdown();
1353      // Wait till hbase is down before going on to shutdown zk.
1354      this.hbaseCluster.waitUntilShutDown();
1355      this.hbaseCluster = null;
1356    }
1357    if (zooKeeperWatcher != null) {
1358      zooKeeperWatcher.close();
1359      zooKeeperWatcher = null;
1360    }
1361  }
1362
1363  /**
1364   * Abruptly Shutdown HBase mini cluster. Does not shutdown zk or dfs if running.
1365   * @throws java.io.IOException throws in case command is unsuccessful
1366   */
1367  public void killMiniHBaseCluster() throws IOException {
1368    cleanup();
1369    if (this.hbaseCluster != null) {
1370      getMiniHBaseCluster().killAll();
1371      this.hbaseCluster = null;
1372    }
1373    if (zooKeeperWatcher != null) {
1374      zooKeeperWatcher.close();
1375      zooKeeperWatcher = null;
1376    }
1377  }
1378
1379  // close hbase admin, close current connection and reset MIN MAX configs for RS.
1380  private void cleanup() throws IOException {
1381    closeConnection();
1382    // unset the configuration for MIN and MAX RS to start
1383    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1384    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1385  }
1386
1387  /**
1388   * Returns the path to the default root dir the minicluster uses. If <code>create</code>
1389   * is true, a new root directory path is fetched irrespective of whether it has been fetched
1390   * before or not. If false, previous path is used.
1391   * Note: this does not cause the root dir to be created.
1392   * @return Fully qualified path for the default hbase root dir
1393   * @throws IOException
1394   */
1395  public Path getDefaultRootDirPath(boolean create) throws IOException {
1396    if (!create) {
1397      return getDataTestDirOnTestFS();
1398    } else {
1399      return getNewDataTestDirOnTestFS();
1400    }
1401  }
1402
1403  /**
1404   * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
1405   * except that <code>create</code> flag is false.
1406   * Note: this does not cause the root dir to be created.
1407   * @return Fully qualified path for the default hbase root dir
1408   * @throws IOException
1409   */
1410  public Path getDefaultRootDirPath() throws IOException {
1411    return getDefaultRootDirPath(false);
1412  }
1413
1414  /**
1415   * Creates an hbase rootdir in user home directory.  Also creates hbase
1416   * version file.  Normally you won't make use of this method.  Root hbasedir
1417   * is created for you as part of mini cluster startup.  You'd only use this
1418   * method if you were doing manual operation.
1419   * @param create This flag decides whether to get a new
1420   * root or data directory path or not, if it has been fetched already.
1421   * Note : Directory will be made irrespective of whether path has been fetched or not.
1422   * If directory already exists, it will be overwritten
1423   * @return Fully qualified path to hbase root dir
1424   * @throws IOException
1425   */
1426  public Path createRootDir(boolean create) throws IOException {
1427    FileSystem fs = FileSystem.get(this.conf);
1428    Path hbaseRootdir = getDefaultRootDirPath(create);
1429    CommonFSUtils.setRootDir(this.conf, hbaseRootdir);
1430    fs.mkdirs(hbaseRootdir);
1431    FSUtils.setVersion(fs, hbaseRootdir);
1432    return hbaseRootdir;
1433  }
1434
1435  /**
1436   * Same as {@link HBaseTestingUtility#createRootDir(boolean create)}
1437   * except that <code>create</code> flag is false.
1438   * @return Fully qualified path to hbase root dir
1439   * @throws IOException
1440   */
1441  public Path createRootDir() throws IOException {
1442    return createRootDir(false);
1443  }
1444
1445  /**
1446   * Creates a hbase walDir in the user's home directory.
1447   * Normally you won't make use of this method. Root hbaseWALDir
1448   * is created for you as part of mini cluster startup. You'd only use this
1449   * method if you were doing manual operation.
1450   *
1451   * @return Fully qualified path to hbase root dir
1452   * @throws IOException
1453  */
1454  public Path createWALRootDir() throws IOException {
1455    FileSystem fs = FileSystem.get(this.conf);
1456    Path walDir = getNewDataTestDirOnTestFS();
1457    CommonFSUtils.setWALRootDir(this.conf, walDir);
1458    fs.mkdirs(walDir);
1459    return walDir;
1460  }
1461
1462  private void setHBaseFsTmpDir() throws IOException {
1463    String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
1464    if (hbaseFsTmpDirInString == null) {
1465      this.conf.set("hbase.fs.tmp.dir",  getDataTestDirOnTestFS("hbase-staging").toString());
1466      LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir"));
1467    } else {
1468      LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
1469    }
1470  }
1471
1472  /**
1473   * Flushes all caches in the mini hbase cluster
1474   * @throws IOException
1475   */
1476  public void flush() throws IOException {
1477    getMiniHBaseCluster().flushcache();
1478  }
1479
1480  /**
1481   * Flushes all caches in the mini hbase cluster
1482   * @throws IOException
1483   */
1484  public void flush(TableName tableName) throws IOException {
1485    getMiniHBaseCluster().flushcache(tableName);
1486  }
1487
1488  /**
1489   * Compact all regions in the mini hbase cluster
1490   * @throws IOException
1491   */
1492  public void compact(boolean major) throws IOException {
1493    getMiniHBaseCluster().compact(major);
1494  }
1495
1496  /**
1497   * Compact all of a table's reagion in the mini hbase cluster
1498   * @throws IOException
1499   */
1500  public void compact(TableName tableName, boolean major) throws IOException {
1501    getMiniHBaseCluster().compact(tableName, major);
1502  }
1503
1504  /**
1505   * Create a table.
1506   * @param tableName
1507   * @param family
1508   * @return A Table instance for the created table.
1509   * @throws IOException
1510   */
1511  public Table createTable(TableName tableName, String family)
1512  throws IOException{
1513    return createTable(tableName, new String[]{family});
1514  }
1515
1516  /**
1517   * Create a table.
1518   * @param tableName
1519   * @param families
1520   * @return A Table instance for the created table.
1521   * @throws IOException
1522   */
1523  public Table createTable(TableName tableName, String[] families)
1524  throws IOException {
1525    List<byte[]> fams = new ArrayList<>(families.length);
1526    for (String family : families) {
1527      fams.add(Bytes.toBytes(family));
1528    }
1529    return createTable(tableName, fams.toArray(new byte[0][]));
1530  }
1531
1532  /**
1533   * Create a table.
1534   * @param tableName
1535   * @param family
1536   * @return A Table instance for the created table.
1537   * @throws IOException
1538   */
1539  public Table createTable(TableName tableName, byte[] family)
1540  throws IOException{
1541    return createTable(tableName, new byte[][]{family});
1542  }
1543
1544  /**
1545   * Create a table with multiple regions.
1546   * @param tableName
1547   * @param family
1548   * @param numRegions
1549   * @return A Table instance for the created table.
1550   * @throws IOException
1551   */
1552  public Table createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
1553      throws IOException {
1554    if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1555    byte[] startKey = Bytes.toBytes("aaaaa");
1556    byte[] endKey = Bytes.toBytes("zzzzz");
1557    byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1558
1559    return createTable(tableName, new byte[][] { family }, splitKeys);
1560  }
1561
1562  /**
1563   * Create a table.
1564   * @param tableName
1565   * @param families
1566   * @return A Table instance for the created table.
1567   * @throws IOException
1568   */
1569  public Table createTable(TableName tableName, byte[][] families)
1570  throws IOException {
1571    return createTable(tableName, families, (byte[][]) null);
1572  }
1573
1574  /**
1575   * Create a table with multiple regions.
1576   * @param tableName
1577   * @param families
1578   * @return A Table instance for the created table.
1579   * @throws IOException
1580   */
1581  public Table createMultiRegionTable(TableName tableName, byte[][] families) throws IOException {
1582    return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE);
1583  }
1584
1585  /**
1586   * Create a table with multiple regions.
1587   * @param tableName
1588   * @param replicaCount replica count.
1589   * @param families
1590   * @return A Table instance for the created table.
1591   * @throws IOException
1592   */
1593  public Table createMultiRegionTable(TableName tableName, int replicaCount, byte[][] families)
1594    throws IOException {
1595    return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE, replicaCount);
1596  }
1597
1598  /**
1599   * Create a table.
1600   * @param tableName
1601   * @param families
1602   * @param splitKeys
1603   * @return A Table instance for the created table.
1604   * @throws IOException
1605   */
1606  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys)
1607      throws IOException {
1608    return createTable(tableName, families, splitKeys, 1, new Configuration(getConfiguration()));
1609  }
1610
1611  /**
1612   * Create a table.
1613   * @param tableName the table name
1614   * @param families the families
1615   * @param splitKeys the splitkeys
1616   * @param replicaCount the region replica count
1617   * @return A Table instance for the created table.
1618   * @throws IOException throws IOException
1619   */
1620  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
1621      int replicaCount) throws IOException {
1622    return createTable(tableName, families, splitKeys, replicaCount,
1623      new Configuration(getConfiguration()));
1624  }
1625
1626  public Table createTable(TableName tableName, byte[][] families,
1627      int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1628  throws IOException{
1629    HTableDescriptor desc = createTableDescriptor(tableName, families, numVersions);
1630
1631    getAdmin().createTable(desc, startKey, endKey, numRegions);
1632    // HBaseAdmin only waits for regions to appear in hbase:meta we
1633    // should wait until they are assigned
1634    waitUntilAllRegionsAssigned(tableName);
1635    return getConnection().getTable(tableName);
1636  }
1637
1638  /**
1639   * Create a table.
1640   * @param htd
1641   * @param families
1642   * @param c Configuration to use
1643   * @return A Table instance for the created table.
1644   * @throws IOException
1645   */
1646  public Table createTable(TableDescriptor htd, byte[][] families, Configuration c)
1647  throws IOException {
1648    return createTable(htd, families, null, c);
1649  }
1650
1651  /**
1652   * Create a table.
1653   * @param htd table descriptor
1654   * @param families array of column families
1655   * @param splitKeys array of split keys
1656   * @param c Configuration to use
1657   * @return A Table instance for the created table.
1658   * @throws IOException if getAdmin or createTable fails
1659   */
1660  public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
1661      Configuration c) throws IOException {
1662    // Disable blooms (they are on by default as of 0.95) but we disable them here because
1663    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1664    // on is interfering.
1665    return createTable(htd, families, splitKeys, BloomType.NONE, HConstants.DEFAULT_BLOCKSIZE, c);
1666  }
1667
1668  /**
1669   * Create a table.
1670   * @param htd table descriptor
1671   * @param families array of column families
1672   * @param splitKeys array of split keys
1673   * @param type Bloom type
1674   * @param blockSize block size
1675   * @param c Configuration to use
1676   * @return A Table instance for the created table.
1677   * @throws IOException if getAdmin or createTable fails
1678   */
1679
1680  public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
1681      BloomType type, int blockSize, Configuration c) throws IOException {
1682    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
1683    for (byte[] family : families) {
1684      ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder.newBuilder(family)
1685        .setBloomFilterType(type)
1686        .setBlocksize(blockSize);
1687      if (isNewVersionBehaviorEnabled()) {
1688          cfdb.setNewVersionBehavior(true);
1689      }
1690      builder.setColumnFamily(cfdb.build());
1691    }
1692    TableDescriptor td = builder.build();
1693    getAdmin().createTable(td, splitKeys);
1694    // HBaseAdmin only waits for regions to appear in hbase:meta
1695    // we should wait until they are assigned
1696    waitUntilAllRegionsAssigned(td.getTableName());
1697    return getConnection().getTable(td.getTableName());
1698  }
1699
1700  /**
1701   * Create a table.
1702   * @param htd table descriptor
1703   * @param splitRows array of split keys
1704   * @return A Table instance for the created table.
1705   * @throws IOException
1706   */
1707  public Table createTable(TableDescriptor htd, byte[][] splitRows)
1708      throws IOException {
1709    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
1710    if (isNewVersionBehaviorEnabled()) {
1711      for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
1712         builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family)
1713           .setNewVersionBehavior(true).build());
1714      }
1715    }
1716    getAdmin().createTable(builder.build(), splitRows);
1717    // HBaseAdmin only waits for regions to appear in hbase:meta
1718    // we should wait until they are assigned
1719    waitUntilAllRegionsAssigned(htd.getTableName());
1720    return getConnection().getTable(htd.getTableName());
1721  }
1722
1723  /**
1724   * Create a table.
1725   * @param tableName the table name
1726   * @param families the families
1727   * @param splitKeys the split keys
1728   * @param replicaCount the replica count
1729   * @param c Configuration to use
1730   * @return A Table instance for the created table.
1731   * @throws IOException
1732   */
1733  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
1734      int replicaCount, final Configuration c) throws IOException {
1735    HTableDescriptor htd = new HTableDescriptor(tableName);
1736    htd.setRegionReplication(replicaCount);
1737    return createTable(htd, families, splitKeys, c);
1738  }
1739
1740  /**
1741   * Create a table.
1742   * @param tableName
1743   * @param family
1744   * @param numVersions
1745   * @return A Table instance for the created table.
1746   * @throws IOException
1747   */
1748  public Table createTable(TableName tableName, byte[] family, int numVersions)
1749  throws IOException {
1750    return createTable(tableName, new byte[][]{family}, numVersions);
1751  }
1752
1753  /**
1754   * Create a table.
1755   * @param tableName
1756   * @param families
1757   * @param numVersions
1758   * @return A Table instance for the created table.
1759   * @throws IOException
1760   */
1761  public Table createTable(TableName tableName, byte[][] families, int numVersions)
1762      throws IOException {
1763    return createTable(tableName, families, numVersions, (byte[][]) null);
1764  }
1765
1766  /**
1767   * Create a table.
1768   * @param tableName
1769   * @param families
1770   * @param numVersions
1771   * @param splitKeys
1772   * @return A Table instance for the created table.
1773   * @throws IOException
1774   */
1775  public Table createTable(TableName tableName, byte[][] families, int numVersions,
1776      byte[][] splitKeys) throws IOException {
1777    HTableDescriptor desc = new HTableDescriptor(tableName);
1778    for (byte[] family : families) {
1779      HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1780      if (isNewVersionBehaviorEnabled()) {
1781        hcd.setNewVersionBehavior(true);
1782      }
1783      desc.addFamily(hcd);
1784    }
1785    getAdmin().createTable(desc, splitKeys);
1786    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1787    // assigned
1788    waitUntilAllRegionsAssigned(tableName);
1789    return getConnection().getTable(tableName);
1790  }
1791
1792  /**
1793   * Create a table with multiple regions.
1794   * @param tableName
1795   * @param families
1796   * @param numVersions
1797   * @return A Table instance for the created table.
1798   * @throws IOException
1799   */
1800  public Table createMultiRegionTable(TableName tableName, byte[][] families, int numVersions)
1801      throws IOException {
1802    return createTable(tableName, families, numVersions, KEYS_FOR_HBA_CREATE_TABLE);
1803  }
1804
1805  /**
1806   * Create a table.
1807   * @param tableName
1808   * @param families
1809   * @param numVersions
1810   * @param blockSize
1811   * @return A Table instance for the created table.
1812   * @throws IOException
1813   */
1814  public Table createTable(TableName tableName, byte[][] families,
1815    int numVersions, int blockSize) throws IOException {
1816    HTableDescriptor desc = new HTableDescriptor(tableName);
1817    for (byte[] family : families) {
1818      HColumnDescriptor hcd = new HColumnDescriptor(family)
1819          .setMaxVersions(numVersions)
1820          .setBlocksize(blockSize);
1821      if (isNewVersionBehaviorEnabled()) {
1822        hcd.setNewVersionBehavior(true);
1823      }
1824      desc.addFamily(hcd);
1825    }
1826    getAdmin().createTable(desc);
1827    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1828    // assigned
1829    waitUntilAllRegionsAssigned(tableName);
1830    return getConnection().getTable(tableName);
1831  }
1832
1833  public Table createTable(TableName tableName, byte[][] families,
1834      int numVersions, int blockSize, String cpName) throws IOException {
1835      HTableDescriptor desc = new HTableDescriptor(tableName);
1836      for (byte[] family : families) {
1837        HColumnDescriptor hcd = new HColumnDescriptor(family)
1838            .setMaxVersions(numVersions)
1839            .setBlocksize(blockSize);
1840        if (isNewVersionBehaviorEnabled()) {
1841          hcd.setNewVersionBehavior(true);
1842        }
1843        desc.addFamily(hcd);
1844      }
1845      if(cpName != null) {
1846        desc.addCoprocessor(cpName);
1847      }
1848      getAdmin().createTable(desc);
1849      // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1850      // assigned
1851      waitUntilAllRegionsAssigned(tableName);
1852      return getConnection().getTable(tableName);
1853    }
1854
1855  /**
1856   * Create a table.
1857   * @param tableName
1858   * @param families
1859   * @param numVersions
1860   * @return A Table instance for the created table.
1861   * @throws IOException
1862   */
1863  public Table createTable(TableName tableName, byte[][] families,
1864      int[] numVersions)
1865  throws IOException {
1866    HTableDescriptor desc = new HTableDescriptor(tableName);
1867    int i = 0;
1868    for (byte[] family : families) {
1869      HColumnDescriptor hcd = new HColumnDescriptor(family)
1870          .setMaxVersions(numVersions[i]);
1871      if (isNewVersionBehaviorEnabled()) {
1872        hcd.setNewVersionBehavior(true);
1873      }
1874      desc.addFamily(hcd);
1875      i++;
1876    }
1877    getAdmin().createTable(desc);
1878    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1879    // assigned
1880    waitUntilAllRegionsAssigned(tableName);
1881    return getConnection().getTable(tableName);
1882  }
1883
1884  /**
1885   * Create a table.
1886   * @param tableName
1887   * @param family
1888   * @param splitRows
1889   * @return A Table instance for the created table.
1890   * @throws IOException
1891   */
1892  public Table createTable(TableName tableName, byte[] family, byte[][] splitRows)
1893      throws IOException {
1894    HTableDescriptor desc = new HTableDescriptor(tableName);
1895    HColumnDescriptor hcd = new HColumnDescriptor(family);
1896    if (isNewVersionBehaviorEnabled()) {
1897      hcd.setNewVersionBehavior(true);
1898    }
1899    desc.addFamily(hcd);
1900    getAdmin().createTable(desc, splitRows);
1901    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1902    // assigned
1903    waitUntilAllRegionsAssigned(tableName);
1904    return getConnection().getTable(tableName);
1905  }
1906
1907  /**
1908   * Create a table with multiple regions.
1909   * @param tableName
1910   * @param family
1911   * @return A Table instance for the created table.
1912   * @throws IOException
1913   */
1914  public Table createMultiRegionTable(TableName tableName, byte[] family) throws IOException {
1915    return createTable(tableName, family, KEYS_FOR_HBA_CREATE_TABLE);
1916  }
1917
1918  /**
1919   * Modify a table, synchronous. Waiting logic similar to that of {@code admin.rb#alter_status}.
1920   */
1921  @SuppressWarnings("serial")
1922  public static void modifyTableSync(Admin admin, TableDescriptor desc)
1923      throws IOException, InterruptedException {
1924    admin.modifyTable(desc);
1925    Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
1926      setFirst(0);
1927      setSecond(0);
1928    }};
1929    int i = 0;
1930    do {
1931      status = admin.getAlterStatus(desc.getTableName());
1932      if (status.getSecond() != 0) {
1933        LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond()
1934          + " regions updated.");
1935        Thread.sleep(1 * 1000L);
1936      } else {
1937        LOG.debug("All regions updated.");
1938        break;
1939      }
1940    } while (status.getFirst() != 0 && i++ < 500);
1941    if (status.getFirst() != 0) {
1942      throw new IOException("Failed to update all regions even after 500 seconds.");
1943    }
1944  }
1945
1946  /**
1947   * Set the number of Region replicas.
1948   */
1949  public static void setReplicas(Admin admin, TableName table, int replicaCount)
1950    throws IOException, InterruptedException {
1951    TableDescriptor desc = TableDescriptorBuilder.newBuilder(admin.getDescriptor(table))
1952      .setRegionReplication(replicaCount).build();
1953    admin.modifyTable(desc);
1954  }
1955
1956  /**
1957   * Drop an existing table
1958   * @param tableName existing table
1959   */
1960  public void deleteTable(TableName tableName) throws IOException {
1961    try {
1962      getAdmin().disableTable(tableName);
1963    } catch (TableNotEnabledException e) {
1964      LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1965    }
1966    getAdmin().deleteTable(tableName);
1967  }
1968
1969  /**
1970   * Drop an existing table
1971   * @param tableName existing table
1972   */
1973  public void deleteTableIfAny(TableName tableName) throws IOException {
1974    try {
1975      deleteTable(tableName);
1976    } catch (TableNotFoundException e) {
1977      // ignore
1978    }
1979  }
1980
1981  // ==========================================================================
1982  // Canned table and table descriptor creation
1983  // TODO replace HBaseTestCase
1984
1985  public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1986  public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1987  public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1988  public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1989  private static final int MAXVERSIONS = 3;
1990
1991  public static final char FIRST_CHAR = 'a';
1992  public static final char LAST_CHAR = 'z';
1993  public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1994  public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1995
1996  /**
1997   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
1998   *   {@link #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)} instead.
1999   * @see #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)
2000   * @see <a href="https://issues.apache.org/jira/browse/HBASE-13893">HBASE-13893</a>
2001   */
2002  @Deprecated
2003  public HTableDescriptor createTableDescriptor(final String name,
2004      final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
2005    return this.createTableDescriptor(TableName.valueOf(name), minVersions, versions, ttl,
2006        keepDeleted);
2007  }
2008
2009  /**
2010   * Create a table of name <code>name</code>.
2011   * @param name Name to give table.
2012   * @return Column descriptor.
2013   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
2014   *   {@link #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)} instead.
2015   * @see #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)
2016   * @see <a href="https://issues.apache.org/jira/browse/HBASE-13893">HBASE-13893</a>
2017   */
2018  @Deprecated
2019  public HTableDescriptor createTableDescriptor(final String name) {
2020    return createTableDescriptor(TableName.valueOf(name),  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
2021        MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
2022  }
2023
2024  public HTableDescriptor createTableDescriptor(final TableName name,
2025      final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
2026    HTableDescriptor htd = new HTableDescriptor(name);
2027    for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
2028      HColumnDescriptor hcd = new HColumnDescriptor(cfName)
2029          .setMinVersions(minVersions)
2030          .setMaxVersions(versions)
2031          .setKeepDeletedCells(keepDeleted)
2032          .setBlockCacheEnabled(false)
2033          .setTimeToLive(ttl);
2034      if (isNewVersionBehaviorEnabled()) {
2035          hcd.setNewVersionBehavior(true);
2036      }
2037      htd.addFamily(hcd);
2038    }
2039    return htd;
2040  }
2041
2042  /**
2043   * Create a table of name <code>name</code>.
2044   * @param name Name to give table.
2045   * @return Column descriptor.
2046   */
2047  public HTableDescriptor createTableDescriptor(final TableName name) {
2048    return createTableDescriptor(name,  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
2049        MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
2050  }
2051
2052  public HTableDescriptor createTableDescriptor(final TableName tableName,
2053      byte[] family) {
2054    return createTableDescriptor(tableName, new byte[][] {family}, 1);
2055  }
2056
2057  public HTableDescriptor createTableDescriptor(final TableName tableName,
2058      byte[][] families, int maxVersions) {
2059    HTableDescriptor desc = new HTableDescriptor(tableName);
2060    for (byte[] family : families) {
2061      HColumnDescriptor hcd = new HColumnDescriptor(family)
2062          .setMaxVersions(maxVersions);
2063      if (isNewVersionBehaviorEnabled()) {
2064          hcd.setNewVersionBehavior(true);
2065      }
2066      desc.addFamily(hcd);
2067    }
2068    return desc;
2069  }
2070
2071  /**
2072   * Create an HRegion that writes to the local tmp dirs
2073   * @param desc a table descriptor indicating which table the region belongs to
2074   * @param startKey the start boundary of the region
2075   * @param endKey the end boundary of the region
2076   * @return a region that writes to local dir for testing
2077   * @throws IOException
2078   */
2079  public HRegion createLocalHRegion(TableDescriptor desc, byte [] startKey,
2080      byte [] endKey)
2081  throws IOException {
2082    HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
2083    return createLocalHRegion(hri, desc);
2084  }
2085
2086  /**
2087   * Create an HRegion that writes to the local tmp dirs. Creates the WAL for you. Be sure to call
2088   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when you're finished with it.
2089   */
2090  public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) throws IOException {
2091    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), desc);
2092  }
2093
2094  /**
2095   * Create an HRegion that writes to the local tmp dirs with specified wal
2096   * @param info regioninfo
2097   * @param conf configuration
2098   * @param desc table descriptor
2099   * @param wal wal for this region.
2100   * @return created hregion
2101   * @throws IOException
2102   */
2103  public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDescriptor desc,
2104      WAL wal) throws IOException {
2105    return HRegion.createHRegion(info, getDataTestDir(), conf, desc, wal);
2106  }
2107
2108  /**
2109   * Create an HRegion that writes to the local tmp dirs with specified wal
2110   * @param info regioninfo
2111   * @param info configuration
2112   * @param desc table descriptor
2113   * @param wal wal for this region.
2114   * @return created hregion
2115   * @throws IOException
2116   */
2117  public HRegion createLocalHRegion(HRegionInfo info, Configuration conf, HTableDescriptor desc,
2118      WAL wal) throws IOException {
2119    return HRegion.createHRegion(info, getDataTestDir(), conf, desc, wal);
2120  }
2121
2122  /**
2123   * @param tableName the name of the table
2124   * @param startKey the start key of the region
2125   * @param stopKey the stop key of the region
2126   * @param callingMethod the name of the calling method probably a test method
2127   * @param conf the configuration to use
2128   * @param isReadOnly {@code true} if the table is read only, {@code false} otherwise
2129   * @param families the column families to use
2130   * @throws IOException if an IO problem is encountered
2131   * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)}
2132   *         when done.
2133   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
2134   *   {@link #createLocalHRegion(TableName, byte[], byte[], boolean, Durability, WAL, byte[]...)}
2135   *   instead.
2136   * @see #createLocalHRegion(TableName, byte[], byte[], boolean, Durability, WAL, byte[]...)
2137   * @see <a href="https://issues.apache.org/jira/browse/HBASE-13893">HBASE-13893</a>
2138   */
2139  @Deprecated
2140  public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
2141      String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
2142      WAL wal, byte[]... families) throws IOException {
2143    return createLocalHRegion(TableName.valueOf(tableName), startKey, stopKey, conf, isReadOnly,
2144      durability, wal, families);
2145  }
2146
2147  /**
2148   * @param tableName
2149   * @param startKey
2150   * @param stopKey
2151   * @param isReadOnly
2152   * @param families
2153   * @return A region on which you must call
2154   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
2155   * @throws IOException
2156   */
2157  public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
2158      Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families)
2159      throws IOException {
2160    return createLocalHRegionWithInMemoryFlags(tableName,startKey, stopKey, conf, isReadOnly,
2161        durability, wal, null, families);
2162  }
2163
2164  public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] startKey,
2165      byte[] stopKey, Configuration conf,
2166      boolean isReadOnly, Durability durability, WAL wal, boolean[] compactedMemStore,
2167      byte[]... families)
2168      throws IOException {
2169    HTableDescriptor htd = new HTableDescriptor(tableName);
2170    htd.setReadOnly(isReadOnly);
2171    int i=0;
2172    for (byte[] family : families) {
2173      HColumnDescriptor hcd = new HColumnDescriptor(family);
2174      if(compactedMemStore != null && i < compactedMemStore.length) {
2175        hcd.setInMemoryCompaction(MemoryCompactionPolicy.BASIC);
2176      } else {
2177        hcd.setInMemoryCompaction(MemoryCompactionPolicy.NONE);
2178
2179      }
2180      i++;
2181      // Set default to be three versions.
2182      hcd.setMaxVersions(Integer.MAX_VALUE);
2183      htd.addFamily(hcd);
2184    }
2185    htd.setDurability(durability);
2186    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
2187    return createLocalHRegion(info, conf, htd, wal);
2188  }
2189
2190  //
2191  // ==========================================================================
2192
2193  /**
2194   * Provide an existing table name to truncate.
2195   * Scans the table and issues a delete for each row read.
2196   * @param tableName existing table
2197   * @return HTable to that new table
2198   * @throws IOException
2199   */
2200  public Table deleteTableData(TableName tableName) throws IOException {
2201    Table table = getConnection().getTable(tableName);
2202    Scan scan = new Scan();
2203    ResultScanner resScan = table.getScanner(scan);
2204    for(Result res : resScan) {
2205      Delete del = new Delete(res.getRow());
2206      table.delete(del);
2207    }
2208    resScan = table.getScanner(scan);
2209    resScan.close();
2210    return table;
2211  }
2212
2213  /**
2214   * Truncate a table using the admin command.
2215   * Effectively disables, deletes, and recreates the table.
2216   * @param tableName table which must exist.
2217   * @param preserveRegions keep the existing split points
2218   * @return HTable for the new table
2219   */
2220  public Table truncateTable(final TableName tableName, final boolean preserveRegions) throws
2221      IOException {
2222    Admin admin = getAdmin();
2223    if (!admin.isTableDisabled(tableName)) {
2224      admin.disableTable(tableName);
2225    }
2226    admin.truncateTable(tableName, preserveRegions);
2227    return getConnection().getTable(tableName);
2228  }
2229
2230  /**
2231   * Truncate a table using the admin command.
2232   * Effectively disables, deletes, and recreates the table.
2233   * For previous behavior of issuing row deletes, see
2234   * deleteTableData.
2235   * Expressly does not preserve regions of existing table.
2236   * @param tableName table which must exist.
2237   * @return HTable for the new table
2238   */
2239  public Table truncateTable(final TableName tableName) throws IOException {
2240    return truncateTable(tableName, false);
2241  }
2242
2243  /**
2244   * Load table with rows from 'aaa' to 'zzz'.
2245   * @param t Table
2246   * @param f Family
2247   * @return Count of rows loaded.
2248   * @throws IOException
2249   */
2250  public int loadTable(final Table t, final byte[] f) throws IOException {
2251    return loadTable(t, new byte[][] {f});
2252  }
2253
2254  /**
2255   * Load table with rows from 'aaa' to 'zzz'.
2256   * @param t Table
2257   * @param f Family
2258   * @return Count of rows loaded.
2259   * @throws IOException
2260   */
2261  public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
2262    return loadTable(t, new byte[][] {f}, null, writeToWAL);
2263  }
2264
2265  /**
2266   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2267   * @param t Table
2268   * @param f Array of Families to load
2269   * @return Count of rows loaded.
2270   * @throws IOException
2271   */
2272  public int loadTable(final Table t, final byte[][] f) throws IOException {
2273    return loadTable(t, f, null);
2274  }
2275
2276  /**
2277   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2278   * @param t Table
2279   * @param f Array of Families to load
2280   * @param value the values of the cells. If null is passed, the row key is used as value
2281   * @return Count of rows loaded.
2282   * @throws IOException
2283   */
2284  public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
2285    return loadTable(t, f, value, true);
2286  }
2287
2288  /**
2289   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2290   * @param t Table
2291   * @param f Array of Families to load
2292   * @param value the values of the cells. If null is passed, the row key is used as value
2293   * @return Count of rows loaded.
2294   * @throws IOException
2295   */
2296  public int loadTable(final Table t, final byte[][] f, byte[] value,
2297      boolean writeToWAL) throws IOException {
2298    List<Put> puts = new ArrayList<>();
2299    for (byte[] row : HBaseTestingUtility.ROWS) {
2300      Put put = new Put(row);
2301      put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
2302      for (int i = 0; i < f.length; i++) {
2303        byte[] value1 = value != null ? value : row;
2304        put.addColumn(f[i], f[i], value1);
2305      }
2306      puts.add(put);
2307    }
2308    t.put(puts);
2309    return puts.size();
2310  }
2311
2312  /** A tracker for tracking and validating table rows
2313   * generated with {@link HBaseTestingUtility#loadTable(Table, byte[])}
2314   */
2315  public static class SeenRowTracker {
2316    int dim = 'z' - 'a' + 1;
2317    int[][][] seenRows = new int[dim][dim][dim]; //count of how many times the row is seen
2318    byte[] startRow;
2319    byte[] stopRow;
2320
2321    public SeenRowTracker(byte[] startRow, byte[] stopRow) {
2322      this.startRow = startRow;
2323      this.stopRow = stopRow;
2324    }
2325
2326    void reset() {
2327      for (byte[] row : ROWS) {
2328        seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
2329      }
2330    }
2331
2332    int i(byte b) {
2333      return b - 'a';
2334    }
2335
2336    public void addRow(byte[] row) {
2337      seenRows[i(row[0])][i(row[1])][i(row[2])]++;
2338    }
2339
2340    /** Validate that all the rows between startRow and stopRow are seen exactly once, and
2341     * all other rows none
2342     */
2343    public void validate() {
2344      for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2345        for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2346          for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2347            int count = seenRows[i(b1)][i(b2)][i(b3)];
2348            int expectedCount = 0;
2349            if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
2350                && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
2351              expectedCount = 1;
2352            }
2353            if (count != expectedCount) {
2354              String row = new String(new byte[] {b1,b2,b3}, StandardCharsets.UTF_8);
2355              throw new RuntimeException("Row:" + row + " has a seen count of " + count + " " +
2356                  "instead of " + expectedCount);
2357            }
2358          }
2359        }
2360      }
2361    }
2362  }
2363
2364  public int loadRegion(final HRegion r, final byte[] f) throws IOException {
2365    return loadRegion(r, f, false);
2366  }
2367
2368  public int loadRegion(final Region r, final byte[] f) throws IOException {
2369    return loadRegion((HRegion)r, f);
2370  }
2371
2372  /**
2373   * Load region with rows from 'aaa' to 'zzz'.
2374   * @param r Region
2375   * @param f Family
2376   * @param flush flush the cache if true
2377   * @return Count of rows loaded.
2378   * @throws IOException
2379   */
2380  public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
2381  throws IOException {
2382    byte[] k = new byte[3];
2383    int rowCount = 0;
2384    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2385      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2386        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2387          k[0] = b1;
2388          k[1] = b2;
2389          k[2] = b3;
2390          Put put = new Put(k);
2391          put.setDurability(Durability.SKIP_WAL);
2392          put.addColumn(f, null, k);
2393          if (r.getWAL() == null) {
2394            put.setDurability(Durability.SKIP_WAL);
2395          }
2396          int preRowCount = rowCount;
2397          int pause = 10;
2398          int maxPause = 1000;
2399          while (rowCount == preRowCount) {
2400            try {
2401              r.put(put);
2402              rowCount++;
2403            } catch (RegionTooBusyException e) {
2404              pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
2405              Threads.sleep(pause);
2406            }
2407          }
2408        }
2409      }
2410      if (flush) {
2411        r.flush(true);
2412      }
2413    }
2414    return rowCount;
2415  }
2416
2417  public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow)
2418      throws IOException {
2419    for (int i = startRow; i < endRow; i++) {
2420      byte[] data = Bytes.toBytes(String.valueOf(i));
2421      Put put = new Put(data);
2422      put.addColumn(f, null, data);
2423      t.put(put);
2424    }
2425  }
2426
2427  public void loadRandomRows(final Table t, final byte[] f, int rowSize, int totalRows)
2428      throws IOException {
2429    Random r = new Random();
2430    byte[] row = new byte[rowSize];
2431    for (int i = 0; i < totalRows; i++) {
2432      r.nextBytes(row);
2433      Put put = new Put(row);
2434      put.addColumn(f, new byte[]{0}, new byte[]{0});
2435      t.put(put);
2436    }
2437  }
2438
2439  public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
2440      int replicaId)
2441      throws IOException {
2442    for (int i = startRow; i < endRow; i++) {
2443      String failMsg = "Failed verification of row :" + i;
2444      byte[] data = Bytes.toBytes(String.valueOf(i));
2445      Get get = new Get(data);
2446      get.setReplicaId(replicaId);
2447      get.setConsistency(Consistency.TIMELINE);
2448      Result result = table.get(get);
2449      assertTrue(failMsg, result.containsColumn(f, null));
2450      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
2451      Cell cell = result.getColumnLatestCell(f, null);
2452      assertTrue(failMsg,
2453        Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
2454          cell.getValueLength()));
2455    }
2456  }
2457
2458  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow)
2459      throws IOException {
2460    verifyNumericRows((HRegion)region, f, startRow, endRow);
2461  }
2462
2463  public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow)
2464      throws IOException {
2465    verifyNumericRows(region, f, startRow, endRow, true);
2466  }
2467
2468  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow,
2469      final boolean present) throws IOException {
2470    verifyNumericRows((HRegion)region, f, startRow, endRow, present);
2471  }
2472
2473  public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
2474      final boolean present) throws IOException {
2475    for (int i = startRow; i < endRow; i++) {
2476      String failMsg = "Failed verification of row :" + i;
2477      byte[] data = Bytes.toBytes(String.valueOf(i));
2478      Result result = region.get(new Get(data));
2479
2480      boolean hasResult = result != null && !result.isEmpty();
2481      assertEquals(failMsg + result, present, hasResult);
2482      if (!present) continue;
2483
2484      assertTrue(failMsg, result.containsColumn(f, null));
2485      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
2486      Cell cell = result.getColumnLatestCell(f, null);
2487      assertTrue(failMsg,
2488        Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
2489          cell.getValueLength()));
2490    }
2491  }
2492
2493  public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow)
2494      throws IOException {
2495    for (int i = startRow; i < endRow; i++) {
2496      byte[] data = Bytes.toBytes(String.valueOf(i));
2497      Delete delete = new Delete(data);
2498      delete.addFamily(f);
2499      t.delete(delete);
2500    }
2501  }
2502
2503  /**
2504   * Return the number of rows in the given table.
2505   * @param table to count rows
2506   * @return count of rows
2507   */
2508  public int countRows(final Table table) throws IOException {
2509    return countRows(table, new Scan());
2510  }
2511
2512  public int countRows(final Table table, final Scan scan) throws IOException {
2513    try (ResultScanner results = table.getScanner(scan)) {
2514      int count = 0;
2515      while (results.next() != null) {
2516        count++;
2517      }
2518      return count;
2519    }
2520  }
2521
2522  public int countRows(final Table table, final byte[]... families) throws IOException {
2523    Scan scan = new Scan();
2524    for (byte[] family: families) {
2525      scan.addFamily(family);
2526    }
2527    return countRows(table, scan);
2528  }
2529
2530  /**
2531   * Return the number of rows in the given table.
2532   */
2533  public int countRows(final TableName tableName) throws IOException {
2534    Table table = getConnection().getTable(tableName);
2535    try {
2536      return countRows(table);
2537    } finally {
2538      table.close();
2539    }
2540  }
2541
2542  public int countRows(final Region region) throws IOException {
2543    return countRows(region, new Scan());
2544  }
2545
2546  public int countRows(final Region region, final Scan scan) throws IOException {
2547    InternalScanner scanner = region.getScanner(scan);
2548    try {
2549      return countRows(scanner);
2550    } finally {
2551      scanner.close();
2552    }
2553  }
2554
2555  public int countRows(final InternalScanner scanner) throws IOException {
2556    int scannedCount = 0;
2557    List<Cell> results = new ArrayList<>();
2558    boolean hasMore = true;
2559    while (hasMore) {
2560      hasMore = scanner.next(results);
2561      scannedCount += results.size();
2562      results.clear();
2563    }
2564    return scannedCount;
2565  }
2566
2567  /**
2568   * Return an md5 digest of the entire contents of a table.
2569   */
2570  public String checksumRows(final Table table) throws Exception {
2571
2572    Scan scan = new Scan();
2573    ResultScanner results = table.getScanner(scan);
2574    MessageDigest digest = MessageDigest.getInstance("MD5");
2575    for (Result res : results) {
2576      digest.update(res.getRow());
2577    }
2578    results.close();
2579    return digest.toString();
2580  }
2581
2582  /** All the row values for the data loaded by {@link #loadTable(Table, byte[])} */
2583  public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
2584  static {
2585    int i = 0;
2586    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2587      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2588        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2589          ROWS[i][0] = b1;
2590          ROWS[i][1] = b2;
2591          ROWS[i][2] = b3;
2592          i++;
2593        }
2594      }
2595    }
2596  }
2597
2598  public static final byte[][] KEYS = {
2599    HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
2600    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2601    Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2602    Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2603    Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2604    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2605    Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2606    Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2607    Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
2608  };
2609
2610  public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
2611      Bytes.toBytes("bbb"),
2612      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2613      Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2614      Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2615      Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2616      Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2617      Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2618      Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2619      Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
2620  };
2621
2622  /**
2623   * Create rows in hbase:meta for regions of the specified table with the specified
2624   * start keys.  The first startKey should be a 0 length byte array if you
2625   * want to form a proper range of regions.
2626   * @param conf
2627   * @param htd
2628   * @param startKeys
2629   * @return list of region info for regions added to meta
2630   * @throws IOException
2631   * @deprecated since 2.0 version and will be removed in 3.0 version.
2632   *             use {@link #createMultiRegionsInMeta(Configuration, TableDescriptor, byte[][])}
2633   */
2634  @Deprecated
2635  public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2636      final HTableDescriptor htd, byte [][] startKeys) throws IOException {
2637    return createMultiRegionsInMeta(conf, (TableDescriptor) htd, startKeys)
2638        .stream().map(ImmutableHRegionInfo::new).collect(Collectors.toList());
2639  }
2640  /**
2641   * Create rows in hbase:meta for regions of the specified table with the specified
2642   * start keys.  The first startKey should be a 0 length byte array if you
2643   * want to form a proper range of regions.
2644   * @param conf
2645   * @param htd
2646   * @param startKeys
2647   * @return list of region info for regions added to meta
2648   * @throws IOException
2649   */
2650  public List<RegionInfo> createMultiRegionsInMeta(final Configuration conf,
2651      final TableDescriptor htd, byte [][] startKeys)
2652  throws IOException {
2653    Table meta = getConnection().getTable(TableName.META_TABLE_NAME);
2654    Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2655    List<RegionInfo> newRegions = new ArrayList<>(startKeys.length);
2656    MetaTableAccessor
2657        .updateTableState(getConnection(), htd.getTableName(), TableState.State.ENABLED);
2658    // add custom ones
2659    for (int i = 0; i < startKeys.length; i++) {
2660      int j = (i + 1) % startKeys.length;
2661      RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName())
2662          .setStartKey(startKeys[i])
2663          .setEndKey(startKeys[j])
2664          .build();
2665      MetaTableAccessor.addRegionsToMeta(getConnection(), Collections.singletonList(hri), 1);
2666      newRegions.add(hri);
2667    }
2668
2669    meta.close();
2670    return newRegions;
2671  }
2672
2673  /**
2674   * Create an unmanaged WAL. Be sure to close it when you're through.
2675   */
2676  public static WAL createWal(final Configuration conf, final Path rootDir, final RegionInfo hri)
2677      throws IOException {
2678    // The WAL subsystem will use the default rootDir rather than the passed in rootDir
2679    // unless I pass along via the conf.
2680    Configuration confForWAL = new Configuration(conf);
2681    confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
2682    return new WALFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)).getWAL(hri);
2683  }
2684
2685
2686  /**
2687   * Create a region with it's own WAL. Be sure to call
2688   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2689   */
2690  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2691      final Configuration conf, final TableDescriptor htd) throws IOException {
2692    return createRegionAndWAL(info, rootDir, conf, htd, true);
2693  }
2694
2695  /**
2696   * Create a region with it's own WAL. Be sure to call
2697   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2698   */
2699  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2700      final Configuration conf, final TableDescriptor htd, BlockCache blockCache)
2701      throws IOException {
2702    HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
2703    region.setBlockCache(blockCache);
2704    region.initialize();
2705    return region;
2706  }
2707  /**
2708   * Create a region with it's own WAL. Be sure to call
2709   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2710   */
2711  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2712      final Configuration conf, final TableDescriptor htd, MobFileCache mobFileCache)
2713      throws IOException {
2714    HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
2715    region.setMobFileCache(mobFileCache);
2716    region.initialize();
2717    return region;
2718  }
2719
2720  /**
2721   * Create a region with it's own WAL. Be sure to call
2722   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2723   */
2724  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2725      final Configuration conf, final TableDescriptor htd, boolean initialize)
2726      throws IOException {
2727    ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0,
2728      0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
2729    WAL wal = createWal(conf, rootDir, info);
2730    return HRegion.createHRegion(info, rootDir, conf, htd, wal, initialize);
2731  }
2732
2733  /**
2734   * Returns all rows from the hbase:meta table.
2735   *
2736   * @throws IOException When reading the rows fails.
2737   */
2738  public List<byte[]> getMetaTableRows() throws IOException {
2739    // TODO: Redo using MetaTableAccessor class
2740    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
2741    List<byte[]> rows = new ArrayList<>();
2742    ResultScanner s = t.getScanner(new Scan());
2743    for (Result result : s) {
2744      LOG.info("getMetaTableRows: row -> " +
2745        Bytes.toStringBinary(result.getRow()));
2746      rows.add(result.getRow());
2747    }
2748    s.close();
2749    t.close();
2750    return rows;
2751  }
2752
2753  /**
2754   * Returns all rows from the hbase:meta table for a given user table
2755   *
2756   * @throws IOException When reading the rows fails.
2757   */
2758  public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2759    // TODO: Redo using MetaTableAccessor.
2760    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
2761    List<byte[]> rows = new ArrayList<>();
2762    ResultScanner s = t.getScanner(new Scan());
2763    for (Result result : s) {
2764      RegionInfo info = MetaTableAccessor.getRegionInfo(result);
2765      if (info == null) {
2766        LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2767        // TODO figure out what to do for this new hosed case.
2768        continue;
2769      }
2770
2771      if (info.getTable().equals(tableName)) {
2772        LOG.info("getMetaTableRows: row -> " +
2773            Bytes.toStringBinary(result.getRow()) + info);
2774        rows.add(result.getRow());
2775      }
2776    }
2777    s.close();
2778    t.close();
2779    return rows;
2780  }
2781
2782  /**
2783   * Returns all regions of the specified table
2784   *
2785   * @param tableName the table name
2786   * @return all regions of the specified table
2787   * @throws IOException when getting the regions fails.
2788   */
2789  private List<RegionInfo> getRegions(TableName tableName) throws IOException {
2790    try (Admin admin = getConnection().getAdmin()) {
2791      return admin.getRegions(tableName);
2792    }
2793  }
2794
2795  /*
2796   * Find any other region server which is different from the one identified by parameter
2797   * @param rs
2798   * @return another region server
2799   */
2800  public HRegionServer getOtherRegionServer(HRegionServer rs) {
2801    for (JVMClusterUtil.RegionServerThread rst :
2802      getMiniHBaseCluster().getRegionServerThreads()) {
2803      if (!(rst.getRegionServer() == rs)) {
2804        return rst.getRegionServer();
2805      }
2806    }
2807    return null;
2808  }
2809
2810  /**
2811   * Tool to get the reference to the region server object that holds the
2812   * region of the specified user table.
2813   * @param tableName user table to lookup in hbase:meta
2814   * @return region server that holds it, null if the row doesn't exist
2815   * @throws IOException
2816   * @throws InterruptedException
2817   */
2818  public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2819      throws IOException, InterruptedException {
2820    List<RegionInfo> regions = getRegions(tableName);
2821    if (regions == null || regions.isEmpty()) {
2822      return null;
2823    }
2824    LOG.debug("Found " + regions.size() + " regions for table " +
2825        tableName);
2826
2827    byte[] firstRegionName = regions.stream()
2828        .filter(r -> !r.isOffline())
2829        .map(RegionInfo::getRegionName)
2830        .findFirst()
2831        .orElseThrow(() -> new IOException("online regions not found in table " + tableName));
2832
2833    LOG.debug("firstRegionName=" + Bytes.toString(firstRegionName));
2834    long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2835      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2836    int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2837      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2838    RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2839    while(retrier.shouldRetry()) {
2840      int index = getMiniHBaseCluster().getServerWith(firstRegionName);
2841      if (index != -1) {
2842        return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2843      }
2844      // Came back -1.  Region may not be online yet.  Sleep a while.
2845      retrier.sleepUntilNextRetry();
2846    }
2847    return null;
2848  }
2849
2850  /**
2851   * Starts a <code>MiniMRCluster</code> with a default number of
2852   * <code>TaskTracker</code>'s.
2853   *
2854   * @throws IOException When starting the cluster fails.
2855   */
2856  public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2857    // Set a very high max-disk-utilization percentage to avoid the NodeManagers from failing.
2858    conf.setIfUnset(
2859        "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
2860        "99.0");
2861    startMiniMapReduceCluster(2);
2862    return mrCluster;
2863  }
2864
2865  /**
2866   * Tasktracker has a bug where changing the hadoop.log.dir system property
2867   * will not change its internal static LOG_DIR variable.
2868   */
2869  private void forceChangeTaskLogDir() {
2870    Field logDirField;
2871    try {
2872      logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2873      logDirField.setAccessible(true);
2874
2875      Field modifiersField = Field.class.getDeclaredField("modifiers");
2876      modifiersField.setAccessible(true);
2877      modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2878
2879      logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2880    } catch (SecurityException e) {
2881      throw new RuntimeException(e);
2882    } catch (NoSuchFieldException e) {
2883      // TODO Auto-generated catch block
2884      throw new RuntimeException(e);
2885    } catch (IllegalArgumentException e) {
2886      throw new RuntimeException(e);
2887    } catch (IllegalAccessException e) {
2888      throw new RuntimeException(e);
2889    }
2890  }
2891
2892  /**
2893   * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2894   * filesystem.
2895   * @param servers  The number of <code>TaskTracker</code>'s to start.
2896   * @throws IOException When starting the cluster fails.
2897   */
2898  private void startMiniMapReduceCluster(final int servers) throws IOException {
2899    if (mrCluster != null) {
2900      throw new IllegalStateException("MiniMRCluster is already running");
2901    }
2902    LOG.info("Starting mini mapreduce cluster...");
2903    setupClusterTestDir();
2904    createDirsAndSetProperties();
2905
2906    forceChangeTaskLogDir();
2907
2908    //// hadoop2 specific settings
2909    // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2910    // we up the VM usable so that processes don't get killed.
2911    conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2912
2913    // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2914    // this avoids the problem by disabling speculative task execution in tests.
2915    conf.setBoolean("mapreduce.map.speculative", false);
2916    conf.setBoolean("mapreduce.reduce.speculative", false);
2917    ////
2918
2919    // Allow the user to override FS URI for this map-reduce cluster to use.
2920    mrCluster = new MiniMRCluster(servers,
2921      FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2922      null, null, new JobConf(this.conf));
2923    JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2924    if (jobConf == null) {
2925      jobConf = mrCluster.createJobConf();
2926    }
2927
2928    jobConf.set("mapreduce.cluster.local.dir",
2929      conf.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not
2930    LOG.info("Mini mapreduce cluster started");
2931
2932    // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2933    // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
2934    // necessary config properties here.  YARN-129 required adding a few properties.
2935    conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
2936    // this for mrv2 support; mr1 ignores this
2937    conf.set("mapreduce.framework.name", "yarn");
2938    conf.setBoolean("yarn.is.minicluster", true);
2939    String rmAddress = jobConf.get("yarn.resourcemanager.address");
2940    if (rmAddress != null) {
2941      conf.set("yarn.resourcemanager.address", rmAddress);
2942    }
2943    String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2944    if (historyAddress != null) {
2945      conf.set("mapreduce.jobhistory.address", historyAddress);
2946    }
2947    String schedulerAddress =
2948      jobConf.get("yarn.resourcemanager.scheduler.address");
2949    if (schedulerAddress != null) {
2950      conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2951    }
2952    String mrJobHistoryWebappAddress =
2953      jobConf.get("mapreduce.jobhistory.webapp.address");
2954    if (mrJobHistoryWebappAddress != null) {
2955      conf.set("mapreduce.jobhistory.webapp.address", mrJobHistoryWebappAddress);
2956    }
2957    String yarnRMWebappAddress =
2958      jobConf.get("yarn.resourcemanager.webapp.address");
2959    if (yarnRMWebappAddress != null) {
2960      conf.set("yarn.resourcemanager.webapp.address", yarnRMWebappAddress);
2961    }
2962  }
2963
2964  /**
2965   * Stops the previously started <code>MiniMRCluster</code>.
2966   */
2967  public void shutdownMiniMapReduceCluster() {
2968    if (mrCluster != null) {
2969      LOG.info("Stopping mini mapreduce cluster...");
2970      mrCluster.shutdown();
2971      mrCluster = null;
2972      LOG.info("Mini mapreduce cluster stopped");
2973    }
2974    // Restore configuration to point to local jobtracker
2975    conf.set("mapreduce.jobtracker.address", "local");
2976  }
2977
2978  /**
2979   * Create a stubbed out RegionServerService, mainly for getting FS.
2980   */
2981  public RegionServerServices createMockRegionServerService() throws IOException {
2982    return createMockRegionServerService((ServerName)null);
2983  }
2984
2985  /**
2986   * Create a stubbed out RegionServerService, mainly for getting FS.
2987   * This version is used by TestTokenAuthentication
2988   */
2989  public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws
2990      IOException {
2991    final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2992    rss.setFileSystem(getTestFileSystem());
2993    rss.setRpcServer(rpc);
2994    return rss;
2995  }
2996
2997  /**
2998   * Create a stubbed out RegionServerService, mainly for getting FS.
2999   * This version is used by TestOpenRegionHandler
3000   */
3001  public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
3002    final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
3003    rss.setFileSystem(getTestFileSystem());
3004    return rss;
3005  }
3006
3007  /**
3008   * Switches the logger for the given class to DEBUG level.
3009   * @param clazz The class for which to switch to debug logging.
3010   * @deprecated In 2.3.0, will be removed in 4.0.0. Only support changing log level on log4j now as
3011   *             HBase only uses log4j. You should do this by your own as it you know which log
3012   *             framework you are using then set the log level to debug is very easy.
3013   */
3014  @Deprecated
3015  public void enableDebug(Class<?> clazz) {
3016    Log4jUtils.enableDebug(clazz);
3017  }
3018
3019  /**
3020   * Expire the Master's session
3021   * @throws Exception
3022   */
3023  public void expireMasterSession() throws Exception {
3024    HMaster master = getMiniHBaseCluster().getMaster();
3025    expireSession(master.getZooKeeper(), false);
3026  }
3027
3028  /**
3029   * Expire a region server's session
3030   * @param index which RS
3031   */
3032  public void expireRegionServerSession(int index) throws Exception {
3033    HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
3034    expireSession(rs.getZooKeeper(), false);
3035    decrementMinRegionServerCount();
3036  }
3037
3038  private void decrementMinRegionServerCount() {
3039    // decrement the count for this.conf, for newly spwaned master
3040    // this.hbaseCluster shares this configuration too
3041    decrementMinRegionServerCount(getConfiguration());
3042
3043    // each master thread keeps a copy of configuration
3044    for (MasterThread master : getHBaseCluster().getMasterThreads()) {
3045      decrementMinRegionServerCount(master.getMaster().getConfiguration());
3046    }
3047  }
3048
3049  private void decrementMinRegionServerCount(Configuration conf) {
3050    int currentCount = conf.getInt(
3051        ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
3052    if (currentCount != -1) {
3053      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
3054          Math.max(currentCount - 1, 1));
3055    }
3056  }
3057
3058  public void expireSession(ZKWatcher nodeZK) throws Exception {
3059   expireSession(nodeZK, false);
3060  }
3061
3062  /**
3063   * Expire a ZooKeeper session as recommended in ZooKeeper documentation
3064   * http://hbase.apache.org/book.html#trouble.zookeeper
3065   * There are issues when doing this:
3066   * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
3067   * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
3068   *
3069   * @param nodeZK - the ZK watcher to expire
3070   * @param checkStatus - true to check if we can create a Table with the
3071   *                    current configuration.
3072   */
3073  public void expireSession(ZKWatcher nodeZK, boolean checkStatus)
3074    throws Exception {
3075    Configuration c = new Configuration(this.conf);
3076    String quorumServers = ZKConfig.getZKQuorumServersString(c);
3077    ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
3078    byte[] password = zk.getSessionPasswd();
3079    long sessionID = zk.getSessionId();
3080
3081    // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
3082    //  so we create a first watcher to be sure that the
3083    //  event was sent. We expect that if our watcher receives the event
3084    //  other watchers on the same machine will get is as well.
3085    // When we ask to close the connection, ZK does not close it before
3086    //  we receive all the events, so don't have to capture the event, just
3087    //  closing the connection should be enough.
3088    ZooKeeper monitor = new ZooKeeper(quorumServers,
3089      1000, new org.apache.zookeeper.Watcher(){
3090      @Override
3091      public void process(WatchedEvent watchedEvent) {
3092        LOG.info("Monitor ZKW received event="+watchedEvent);
3093      }
3094    } , sessionID, password);
3095
3096    // Making it expire
3097    ZooKeeper newZK = new ZooKeeper(quorumServers,
3098        1000, EmptyWatcher.instance, sessionID, password);
3099
3100    //ensure that we have connection to the server before closing down, otherwise
3101    //the close session event will be eaten out before we start CONNECTING state
3102    long start = System.currentTimeMillis();
3103    while (newZK.getState() != States.CONNECTED
3104         && System.currentTimeMillis() - start < 1000) {
3105       Thread.sleep(1);
3106    }
3107    newZK.close();
3108    LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
3109
3110    // Now closing & waiting to be sure that the clients get it.
3111    monitor.close();
3112
3113    if (checkStatus) {
3114      getConnection().getTable(TableName.META_TABLE_NAME).close();
3115    }
3116  }
3117
3118  /**
3119   * Get the Mini HBase cluster.
3120   *
3121   * @return hbase cluster
3122   * @see #getHBaseClusterInterface()
3123   */
3124  public MiniHBaseCluster getHBaseCluster() {
3125    return getMiniHBaseCluster();
3126  }
3127
3128  /**
3129   * Returns the HBaseCluster instance.
3130   * <p>Returned object can be any of the subclasses of HBaseCluster, and the
3131   * tests referring this should not assume that the cluster is a mini cluster or a
3132   * distributed one. If the test only works on a mini cluster, then specific
3133   * method {@link #getMiniHBaseCluster()} can be used instead w/o the
3134   * need to type-cast.
3135   */
3136  public HBaseCluster getHBaseClusterInterface() {
3137    //implementation note: we should rename this method as #getHBaseCluster(),
3138    //but this would require refactoring 90+ calls.
3139    return hbaseCluster;
3140  }
3141
3142  /**
3143   * Resets the connections so that the next time getConnection() is called, a new connection is
3144   * created. This is needed in cases where the entire cluster / all the masters are shutdown and
3145   * the connection is not valid anymore.
3146   * TODO: There should be a more coherent way of doing this. Unfortunately the way tests are
3147   *   written, not all start() stop() calls go through this class. Most tests directly operate on
3148   *   the underlying mini/local hbase cluster. That makes it difficult for this wrapper class to
3149   *   maintain the connection state automatically. Cleaning this is a much bigger refactor.
3150   */
3151  public void invalidateConnection() throws IOException {
3152    closeConnection();
3153    // Update the master addresses if they changed.
3154    final String masterConfigBefore = conf.get(HConstants.MASTER_ADDRS_KEY);
3155    final String masterConfAfter = getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY);
3156    LOG.info("Invalidated connection. Updating master addresses before: {} after: {}",
3157        masterConfigBefore, masterConfAfter);
3158    conf.set(HConstants.MASTER_ADDRS_KEY,
3159        getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY));
3160  }
3161
3162  /**
3163   * Get a shared Connection to the cluster.
3164   * this method is threadsafe.
3165   * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster.
3166   * @throws IOException
3167   */
3168  public Connection getConnection() throws IOException {
3169    try {
3170      return this.connection.updateAndGet(connection -> {
3171        if (connection == null) {
3172          try {
3173            connection = ConnectionFactory.createConnection(this.conf);
3174          } catch(IOException ioe) {
3175            throw new UncheckedIOException("Failed to create connection", ioe);
3176          }
3177        }
3178        return connection;
3179      });
3180    } catch (UncheckedIOException exception) {
3181      throw exception.getCause();
3182    }
3183  }
3184
3185  /**
3186   * Returns a Admin instance.
3187   * This instance is shared between HBaseTestingUtility instance users. Closing it has no effect,
3188   * it will be closed automatically when the cluster shutdowns
3189   *
3190   * @return HBaseAdmin instance which is guaranteed to support only {@link Admin} interface.
3191   *   Functions in HBaseAdmin not provided by {@link Admin} interface can be changed/deleted
3192   *   anytime.
3193   * @deprecated Since 2.0. Will be removed in 3.0. Use {@link #getAdmin()} instead.
3194   */
3195  @Deprecated
3196  public synchronized HBaseAdmin getHBaseAdmin()
3197  throws IOException {
3198    if (hbaseAdmin == null){
3199      this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
3200    }
3201    return hbaseAdmin;
3202  }
3203
3204  public void closeConnection() throws IOException {
3205    if (hbaseAdmin != null) {
3206      Closeables.close(hbaseAdmin, true);
3207      hbaseAdmin = null;
3208    }
3209    Connection connection = this.connection.getAndSet(null);
3210    if (connection != null) {
3211      Closeables.close(connection, true);
3212    }
3213  }
3214
3215  /**
3216   * Returns an Admin instance which is shared between HBaseTestingUtility instance users.
3217   * Closing it has no effect, it will be closed automatically when the cluster shutdowns
3218   */
3219  public synchronized Admin getAdmin() throws IOException {
3220    if (hbaseAdmin == null){
3221      this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
3222    }
3223    return hbaseAdmin;
3224  }
3225
3226  private HBaseAdmin hbaseAdmin = null;
3227
3228  /**
3229   * Returns an {@link Hbck} instance. Needs be closed when done.
3230   */
3231  public Hbck getHbck() throws IOException {
3232    return getConnection().getHbck();
3233  }
3234
3235  /**
3236   * Unassign the named region.
3237   *
3238   * @param regionName  The region to unassign.
3239   */
3240  public void unassignRegion(String regionName) throws IOException {
3241    unassignRegion(Bytes.toBytes(regionName));
3242  }
3243
3244  /**
3245   * Unassign the named region.
3246   *
3247   * @param regionName  The region to unassign.
3248   */
3249  public void unassignRegion(byte[] regionName) throws IOException {
3250    getAdmin().unassign(regionName, true);
3251  }
3252
3253  /**
3254   * Closes the region containing the given row.
3255   *
3256   * @param row  The row to find the containing region.
3257   * @param table  The table to find the region.
3258   */
3259  public void unassignRegionByRow(String row, RegionLocator table) throws IOException {
3260    unassignRegionByRow(Bytes.toBytes(row), table);
3261  }
3262
3263  /**
3264   * Closes the region containing the given row.
3265   *
3266   * @param row  The row to find the containing region.
3267   * @param table  The table to find the region.
3268   * @throws IOException
3269   */
3270  public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOException {
3271    HRegionLocation hrl = table.getRegionLocation(row);
3272    unassignRegion(hrl.getRegionInfo().getRegionName());
3273  }
3274
3275  /**
3276   * Retrieves a splittable region randomly from tableName
3277   * @param tableName name of table
3278   * @param maxAttempts maximum number of attempts, unlimited for value of -1
3279   * @return the HRegion chosen, null if none was found within limit of maxAttempts
3280   */
3281  public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
3282    List<HRegion> regions = getHBaseCluster().getRegions(tableName);
3283    int regCount = regions.size();
3284    Set<Integer> attempted = new HashSet<>();
3285    int idx;
3286    int attempts = 0;
3287    do {
3288      regions = getHBaseCluster().getRegions(tableName);
3289      if (regCount != regions.size()) {
3290        // if there was region movement, clear attempted Set
3291        attempted.clear();
3292      }
3293      regCount = regions.size();
3294      // There are chances that before we get the region for the table from an RS the region may
3295      // be going for CLOSE.  This may be because online schema change is enabled
3296      if (regCount > 0) {
3297        idx = random.nextInt(regCount);
3298        // if we have just tried this region, there is no need to try again
3299        if (attempted.contains(idx)) {
3300          continue;
3301        }
3302        HRegion region = regions.get(idx);
3303        if (region.checkSplit().isPresent()) {
3304          return region;
3305        }
3306        attempted.add(idx);
3307      }
3308      attempts++;
3309    } while (maxAttempts == -1 || attempts < maxAttempts);
3310    return null;
3311  }
3312
3313  public MiniDFSCluster getDFSCluster() {
3314    return dfsCluster;
3315  }
3316
3317  public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, IOException {
3318    setDFSCluster(cluster, true);
3319  }
3320
3321  /**
3322   * Set the MiniDFSCluster
3323   * @param cluster cluster to use
3324   * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before
3325   * it is set.
3326   * @throws IllegalStateException if the passed cluster is up when it is required to be down
3327   * @throws IOException if the FileSystem could not be set from the passed dfs cluster
3328   */
3329  public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown)
3330      throws IllegalStateException, IOException {
3331    if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) {
3332      throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
3333    }
3334    this.dfsCluster = cluster;
3335    this.setFs();
3336  }
3337
3338  public FileSystem getTestFileSystem() throws IOException {
3339    return HFileSystem.get(conf);
3340  }
3341
3342  /**
3343   * Wait until all regions in a table have been assigned.  Waits default timeout before giving up
3344   * (30 seconds).
3345   * @param table Table to wait on.
3346   * @throws InterruptedException
3347   * @throws IOException
3348   */
3349  public void waitTableAvailable(TableName table)
3350      throws InterruptedException, IOException {
3351    waitTableAvailable(table.getName(), 30000);
3352  }
3353
3354  public void waitTableAvailable(TableName table, long timeoutMillis)
3355      throws InterruptedException, IOException {
3356    waitFor(timeoutMillis, predicateTableAvailable(table));
3357  }
3358
3359  /**
3360   * Wait until all regions in a table have been assigned
3361   * @param table Table to wait on.
3362   * @param timeoutMillis Timeout.
3363   * @throws InterruptedException
3364   * @throws IOException
3365   */
3366  public void waitTableAvailable(byte[] table, long timeoutMillis)
3367  throws InterruptedException, IOException {
3368    waitFor(timeoutMillis, predicateTableAvailable(TableName.valueOf(table)));
3369  }
3370
3371  public String explainTableAvailability(TableName tableName) throws IOException {
3372    String msg = explainTableState(tableName, TableState.State.ENABLED) + ", ";
3373    if (getHBaseCluster().getMaster().isAlive()) {
3374      Map<RegionInfo, ServerName> assignments =
3375          getHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
3376              .getRegionAssignments();
3377      final List<Pair<RegionInfo, ServerName>> metaLocations =
3378          MetaTableAccessor.getTableRegionsAndLocations(getConnection(), tableName);
3379      for (Pair<RegionInfo, ServerName> metaLocation : metaLocations) {
3380        RegionInfo hri = metaLocation.getFirst();
3381        ServerName sn = metaLocation.getSecond();
3382        if (!assignments.containsKey(hri)) {
3383          msg += ", region " + hri
3384              + " not assigned, but found in meta, it expected to be on " + sn;
3385
3386        } else if (sn == null) {
3387          msg += ",  region " + hri
3388              + " assigned,  but has no server in meta";
3389        } else if (!sn.equals(assignments.get(hri))) {
3390          msg += ",  region " + hri
3391              + " assigned,  but has different servers in meta and AM ( " +
3392              sn + " <> " + assignments.get(hri);
3393        }
3394      }
3395    }
3396    return msg;
3397  }
3398
3399  public String explainTableState(final TableName table, TableState.State state)
3400      throws IOException {
3401    TableState tableState = MetaTableAccessor.getTableState(getConnection(), table);
3402    if (tableState == null) {
3403      return "TableState in META: No table state in META for table " + table
3404          + " last state in meta (including deleted is " + findLastTableState(table) + ")";
3405    } else if (!tableState.inStates(state)) {
3406      return "TableState in META: Not " + state + " state, but " + tableState;
3407    } else {
3408      return "TableState in META: OK";
3409    }
3410  }
3411
3412  @Nullable
3413  public TableState findLastTableState(final TableName table) throws IOException {
3414    final AtomicReference<TableState> lastTableState = new AtomicReference<>(null);
3415    MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
3416      @Override
3417      public boolean visit(Result r) throws IOException {
3418        if (!Arrays.equals(r.getRow(), table.getName()))
3419          return false;
3420        TableState state = MetaTableAccessor.getTableState(r);
3421        if (state != null)
3422          lastTableState.set(state);
3423        return true;
3424      }
3425    };
3426    MetaTableAccessor
3427        .scanMeta(getConnection(), null, null,
3428            MetaTableAccessor.QueryType.TABLE,
3429            Integer.MAX_VALUE, visitor);
3430    return lastTableState.get();
3431  }
3432
3433  /**
3434   * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
3435   * regions have been all assigned.  Will timeout after default period (30 seconds)
3436   * Tolerates nonexistent table.
3437   * @param table the table to wait on.
3438   * @throws InterruptedException if interrupted while waiting
3439   * @throws IOException if an IO problem is encountered
3440   */
3441  public void waitTableEnabled(TableName table)
3442      throws InterruptedException, IOException {
3443    waitTableEnabled(table, 30000);
3444  }
3445
3446  /**
3447   * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
3448   * regions have been all assigned.
3449   * @see #waitTableEnabled(TableName, long)
3450   * @param table Table to wait on.
3451   * @param timeoutMillis Time to wait on it being marked enabled.
3452   * @throws InterruptedException
3453   * @throws IOException
3454   */
3455  public void waitTableEnabled(byte[] table, long timeoutMillis)
3456  throws InterruptedException, IOException {
3457    waitTableEnabled(TableName.valueOf(table), timeoutMillis);
3458  }
3459
3460  public void waitTableEnabled(TableName table, long timeoutMillis)
3461  throws IOException {
3462    waitFor(timeoutMillis, predicateTableEnabled(table));
3463  }
3464
3465  /**
3466   * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
3467   * Will timeout after default period (30 seconds)
3468   * @param table Table to wait on.
3469   * @throws InterruptedException
3470   * @throws IOException
3471   */
3472  public void waitTableDisabled(byte[] table)
3473          throws InterruptedException, IOException {
3474    waitTableDisabled(table, 30000);
3475  }
3476
3477  public void waitTableDisabled(TableName table, long millisTimeout)
3478          throws InterruptedException, IOException {
3479    waitFor(millisTimeout, predicateTableDisabled(table));
3480  }
3481
3482  /**
3483   * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
3484   * @param table Table to wait on.
3485   * @param timeoutMillis Time to wait on it being marked disabled.
3486   * @throws InterruptedException
3487   * @throws IOException
3488   */
3489  public void waitTableDisabled(byte[] table, long timeoutMillis)
3490          throws InterruptedException, IOException {
3491    waitTableDisabled(TableName.valueOf(table), timeoutMillis);
3492  }
3493
3494  /**
3495   * Make sure that at least the specified number of region servers
3496   * are running
3497   * @param num minimum number of region servers that should be running
3498   * @return true if we started some servers
3499   * @throws IOException
3500   */
3501  public boolean ensureSomeRegionServersAvailable(final int num)
3502      throws IOException {
3503    boolean startedServer = false;
3504    MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
3505    for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
3506      LOG.info("Started new server=" + hbaseCluster.startRegionServer());
3507      startedServer = true;
3508    }
3509
3510    return startedServer;
3511  }
3512
3513
3514  /**
3515   * Make sure that at least the specified number of region servers
3516   * are running. We don't count the ones that are currently stopping or are
3517   * stopped.
3518   * @param num minimum number of region servers that should be running
3519   * @return true if we started some servers
3520   * @throws IOException
3521   */
3522  public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
3523    throws IOException {
3524    boolean startedServer = ensureSomeRegionServersAvailable(num);
3525
3526    int nonStoppedServers = 0;
3527    for (JVMClusterUtil.RegionServerThread rst :
3528      getMiniHBaseCluster().getRegionServerThreads()) {
3529
3530      HRegionServer hrs = rst.getRegionServer();
3531      if (hrs.isStopping() || hrs.isStopped()) {
3532        LOG.info("A region server is stopped or stopping:"+hrs);
3533      } else {
3534        nonStoppedServers++;
3535      }
3536    }
3537    for (int i=nonStoppedServers; i<num; ++i) {
3538      LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
3539      startedServer = true;
3540    }
3541    return startedServer;
3542  }
3543
3544
3545  /**
3546   * This method clones the passed <code>c</code> configuration setting a new
3547   * user into the clone.  Use it getting new instances of FileSystem.  Only
3548   * works for DistributedFileSystem w/o Kerberos.
3549   * @param c Initial configuration
3550   * @param differentiatingSuffix Suffix to differentiate this user from others.
3551   * @return A new configuration instance with a different user set into it.
3552   * @throws IOException
3553   */
3554  public static User getDifferentUser(final Configuration c,
3555    final String differentiatingSuffix)
3556  throws IOException {
3557    FileSystem currentfs = FileSystem.get(c);
3558    if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) {
3559      return User.getCurrent();
3560    }
3561    // Else distributed filesystem.  Make a new instance per daemon.  Below
3562    // code is taken from the AppendTestUtil over in hdfs.
3563    String username = User.getCurrent().getName() +
3564      differentiatingSuffix;
3565    User user = User.createUserForTesting(c, username,
3566        new String[]{"supergroup"});
3567    return user;
3568  }
3569
3570  public static NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster)
3571      throws IOException {
3572    NavigableSet<String> online = new TreeSet<>();
3573    for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
3574      try {
3575        for (RegionInfo region :
3576            ProtobufUtil.getOnlineRegions(rst.getRegionServer().getRSRpcServices())) {
3577          online.add(region.getRegionNameAsString());
3578        }
3579      } catch (RegionServerStoppedException e) {
3580        // That's fine.
3581      }
3582    }
3583    for (MasterThread mt : cluster.getLiveMasterThreads()) {
3584      try {
3585        for (RegionInfo region :
3586            ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) {
3587          online.add(region.getRegionNameAsString());
3588        }
3589      } catch (RegionServerStoppedException e) {
3590        // That's fine.
3591      } catch (ServerNotRunningYetException e) {
3592        // That's fine.
3593      }
3594    }
3595    return online;
3596  }
3597
3598  /**
3599   * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
3600   * makes tests linger.  Here is the exception you'll see:
3601   * <pre>
3602   * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block
3603   * blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
3604   * blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683
3605   * failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
3606   * </pre>
3607   * @param stream A DFSClient.DFSOutputStream.
3608   * @param max
3609   * @throws NoSuchFieldException
3610   * @throws SecurityException
3611   * @throws IllegalAccessException
3612   * @throws IllegalArgumentException
3613   */
3614  public static void setMaxRecoveryErrorCount(final OutputStream stream,
3615      final int max) {
3616    try {
3617      Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
3618      for (Class<?> clazz: clazzes) {
3619        String className = clazz.getSimpleName();
3620        if (className.equals("DFSOutputStream")) {
3621          if (clazz.isInstance(stream)) {
3622            Field maxRecoveryErrorCountField =
3623              stream.getClass().getDeclaredField("maxRecoveryErrorCount");
3624            maxRecoveryErrorCountField.setAccessible(true);
3625            maxRecoveryErrorCountField.setInt(stream, max);
3626            break;
3627          }
3628        }
3629      }
3630    } catch (Exception e) {
3631      LOG.info("Could not set max recovery field", e);
3632    }
3633  }
3634
3635  /**
3636   * Uses directly the assignment manager to assign the region. and waits until the specified region
3637   * has completed assignment.
3638   * @return true if the region is assigned false otherwise.
3639   */
3640  public boolean assignRegion(final RegionInfo regionInfo)
3641      throws IOException, InterruptedException {
3642    final AssignmentManager am = getHBaseCluster().getMaster().getAssignmentManager();
3643    am.assign(regionInfo);
3644    return AssignmentTestingUtil.waitForAssignment(am, regionInfo);
3645  }
3646
3647  /**
3648   * Move region to destination server and wait till region is completely moved and online
3649   *
3650   * @param destRegion region to move
3651   * @param destServer destination server of the region
3652   * @throws InterruptedException
3653   * @throws IOException
3654   */
3655  public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer)
3656      throws InterruptedException, IOException {
3657    HMaster master = getMiniHBaseCluster().getMaster();
3658    // TODO: Here we start the move. The move can take a while.
3659    getAdmin().move(destRegion.getEncodedNameAsBytes(), destServer);
3660    while (true) {
3661      ServerName serverName = master.getAssignmentManager().getRegionStates()
3662          .getRegionServerOfRegion(destRegion);
3663      if (serverName != null && serverName.equals(destServer)) {
3664        assertRegionOnServer(destRegion, serverName, 2000);
3665        break;
3666      }
3667      Thread.sleep(10);
3668    }
3669  }
3670
3671  /**
3672   * Wait until all regions for a table in hbase:meta have a non-empty
3673   * info:server, up to a configuable timeout value (default is 60 seconds)
3674   * This means all regions have been deployed,
3675   * master has been informed and updated hbase:meta with the regions deployed
3676   * server.
3677   * @param tableName the table name
3678   * @throws IOException
3679   */
3680  public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
3681    waitUntilAllRegionsAssigned(tableName,
3682      this.conf.getLong("hbase.client.sync.wait.timeout.msec", 60000));
3683  }
3684
3685  /**
3686   * Waith until all system table's regions get assigned
3687   * @throws IOException
3688   */
3689  public void waitUntilAllSystemRegionsAssigned() throws IOException {
3690    waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
3691    waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME);
3692  }
3693
3694  /**
3695   * Wait until all regions for a table in hbase:meta have a non-empty
3696   * info:server, or until timeout.  This means all regions have been deployed,
3697   * master has been informed and updated hbase:meta with the regions deployed
3698   * server.
3699   * @param tableName the table name
3700   * @param timeout timeout, in milliseconds
3701   * @throws IOException
3702   */
3703  public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
3704      throws IOException {
3705    if (!TableName.isMetaTableName(tableName)) {
3706      try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) {
3707        LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " +
3708            timeout + "ms");
3709        waitFor(timeout, 200, true, new ExplainingPredicate<IOException>() {
3710          @Override
3711          public String explainFailure() throws IOException {
3712            return explainTableAvailability(tableName);
3713          }
3714
3715          @Override
3716          public boolean evaluate() throws IOException {
3717            Scan scan = new Scan();
3718            scan.addFamily(HConstants.CATALOG_FAMILY);
3719            boolean tableFound = false;
3720            try (ResultScanner s = meta.getScanner(scan)) {
3721              for (Result r; (r = s.next()) != null;) {
3722                byte[] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
3723                HRegionInfo info = HRegionInfo.parseFromOrNull(b);
3724                if (info != null && info.getTable().equals(tableName)) {
3725                  // Get server hosting this region from catalog family. Return false if no server
3726                  // hosting this region, or if the server hosting this region was recently killed
3727                  // (for fault tolerance testing).
3728                  tableFound = true;
3729                  byte[] server =
3730                      r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
3731                  if (server == null) {
3732                    return false;
3733                  } else {
3734                    byte[] startCode =
3735                        r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
3736                    ServerName serverName =
3737                        ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," +
3738                            Bytes.toLong(startCode));
3739                    if (!getHBaseClusterInterface().isDistributedCluster() &&
3740                        getHBaseCluster().isKilledRS(serverName)) {
3741                      return false;
3742                    }
3743                  }
3744                  if (RegionStateStore.getRegionState(r, info) != RegionState.State.OPEN) {
3745                    return false;
3746                  }
3747                }
3748              }
3749            }
3750            if (!tableFound) {
3751              LOG.warn("Didn't find the entries for table " + tableName + " in meta, already deleted?");
3752            }
3753            return tableFound;
3754          }
3755        });
3756      }
3757    }
3758    LOG.info("All regions for table " + tableName + " assigned to meta. Checking AM states.");
3759    // check from the master state if we are using a mini cluster
3760    if (!getHBaseClusterInterface().isDistributedCluster()) {
3761      // So, all regions are in the meta table but make sure master knows of the assignments before
3762      // returning -- sometimes this can lag.
3763      HMaster master = getHBaseCluster().getMaster();
3764      final RegionStates states = master.getAssignmentManager().getRegionStates();
3765      waitFor(timeout, 200, new ExplainingPredicate<IOException>() {
3766        @Override
3767        public String explainFailure() throws IOException {
3768          return explainTableAvailability(tableName);
3769        }
3770
3771        @Override
3772        public boolean evaluate() throws IOException {
3773          List<RegionInfo> hris = states.getRegionsOfTable(tableName);
3774          return hris != null && !hris.isEmpty();
3775        }
3776      });
3777    }
3778    LOG.info("All regions for table " + tableName + " assigned.");
3779  }
3780
3781  /**
3782   * Do a small get/scan against one store. This is required because store
3783   * has no actual methods of querying itself, and relies on StoreScanner.
3784   */
3785  public static List<Cell> getFromStoreFile(HStore store,
3786                                                Get get) throws IOException {
3787    Scan scan = new Scan(get);
3788    InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3789        scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()),
3790        // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set
3791        // readpoint 0.
3792        0);
3793
3794    List<Cell> result = new ArrayList<>();
3795    scanner.next(result);
3796    if (!result.isEmpty()) {
3797      // verify that we are on the row we want:
3798      Cell kv = result.get(0);
3799      if (!CellUtil.matchingRows(kv, get.getRow())) {
3800        result.clear();
3801      }
3802    }
3803    scanner.close();
3804    return result;
3805  }
3806
3807  /**
3808   * Create region split keys between startkey and endKey
3809   *
3810   * @param startKey
3811   * @param endKey
3812   * @param numRegions the number of regions to be created. it has to be greater than 3.
3813   * @return resulting split keys
3814   */
3815  public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
3816    assertTrue(numRegions>3);
3817    byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3818    byte [][] result = new byte[tmpSplitKeys.length+1][];
3819    System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3820    result[0] = HConstants.EMPTY_BYTE_ARRAY;
3821    return result;
3822  }
3823
3824  /**
3825   * Do a small get/scan against one store. This is required because store
3826   * has no actual methods of querying itself, and relies on StoreScanner.
3827   */
3828  public static List<Cell> getFromStoreFile(HStore store,
3829                                                byte [] row,
3830                                                NavigableSet<byte[]> columns
3831                                                ) throws IOException {
3832    Get get = new Get(row);
3833    Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3834    s.put(store.getColumnFamilyDescriptor().getName(), columns);
3835
3836    return getFromStoreFile(store,get);
3837  }
3838
3839  public static void assertKVListsEqual(String additionalMsg,
3840      final List<? extends Cell> expected,
3841      final List<? extends Cell> actual) {
3842    final int eLen = expected.size();
3843    final int aLen = actual.size();
3844    final int minLen = Math.min(eLen, aLen);
3845
3846    int i;
3847    for (i = 0; i < minLen
3848        && CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0;
3849        ++i) {}
3850
3851    if (additionalMsg == null) {
3852      additionalMsg = "";
3853    }
3854    if (!additionalMsg.isEmpty()) {
3855      additionalMsg = ". " + additionalMsg;
3856    }
3857
3858    if (eLen != aLen || i != minLen) {
3859      throw new AssertionError(
3860          "Expected and actual KV arrays differ at position " + i + ": " +
3861          safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3862          safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3863    }
3864  }
3865
3866  public static <T> String safeGetAsStr(List<T> lst, int i) {
3867    if (0 <= i && i < lst.size()) {
3868      return lst.get(i).toString();
3869    } else {
3870      return "<out_of_range>";
3871    }
3872  }
3873
3874  public String getClusterKey() {
3875    return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3876        + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3877        + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3878            HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3879  }
3880
3881  /** Creates a random table with the given parameters */
3882  public Table createRandomTable(TableName tableName,
3883      final Collection<String> families,
3884      final int maxVersions,
3885      final int numColsPerRow,
3886      final int numFlushes,
3887      final int numRegions,
3888      final int numRowsPerFlush)
3889      throws IOException, InterruptedException {
3890
3891    LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3892        " regions, " + numFlushes + " storefiles per region, " +
3893        numRowsPerFlush + " rows per flush, maxVersions=" +  maxVersions +
3894        "\n");
3895
3896    final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3897    final int numCF = families.size();
3898    final byte[][] cfBytes = new byte[numCF][];
3899    {
3900      int cfIndex = 0;
3901      for (String cf : families) {
3902        cfBytes[cfIndex++] = Bytes.toBytes(cf);
3903      }
3904    }
3905
3906    final int actualStartKey = 0;
3907    final int actualEndKey = Integer.MAX_VALUE;
3908    final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3909    final int splitStartKey = actualStartKey + keysPerRegion;
3910    final int splitEndKey = actualEndKey - keysPerRegion;
3911    final String keyFormat = "%08x";
3912    final Table table = createTable(tableName, cfBytes,
3913        maxVersions,
3914        Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3915        Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3916        numRegions);
3917
3918    if (hbaseCluster != null) {
3919      getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3920    }
3921
3922    BufferedMutator mutator = getConnection().getBufferedMutator(tableName);
3923
3924    for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3925      for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3926        final byte[] row = Bytes.toBytes(String.format(keyFormat,
3927            actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3928
3929        Put put = new Put(row);
3930        Delete del = new Delete(row);
3931        for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3932          final byte[] cf = cfBytes[rand.nextInt(numCF)];
3933          final long ts = rand.nextInt();
3934          final byte[] qual = Bytes.toBytes("col" + iCol);
3935          if (rand.nextBoolean()) {
3936            final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3937                "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3938                ts + "_random_" + rand.nextLong());
3939            put.addColumn(cf, qual, ts, value);
3940          } else if (rand.nextDouble() < 0.8) {
3941            del.addColumn(cf, qual, ts);
3942          } else {
3943            del.addColumns(cf, qual, ts);
3944          }
3945        }
3946
3947        if (!put.isEmpty()) {
3948          mutator.mutate(put);
3949        }
3950
3951        if (!del.isEmpty()) {
3952          mutator.mutate(del);
3953        }
3954      }
3955      LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3956      mutator.flush();
3957      if (hbaseCluster != null) {
3958        getMiniHBaseCluster().flushcache(table.getName());
3959      }
3960    }
3961    mutator.close();
3962
3963    return table;
3964  }
3965
3966  public static int randomFreePort() {
3967    return HBaseCommonTestingUtility.randomFreePort();
3968  }
3969  public static String randomMultiCastAddress() {
3970    return "226.1.1." + random.nextInt(254);
3971  }
3972
3973  public static void waitForHostPort(String host, int port)
3974      throws IOException {
3975    final int maxTimeMs = 10000;
3976    final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3977    IOException savedException = null;
3978    LOG.info("Waiting for server at " + host + ":" + port);
3979    for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3980      try {
3981        Socket sock = new Socket(InetAddress.getByName(host), port);
3982        sock.close();
3983        savedException = null;
3984        LOG.info("Server at " + host + ":" + port + " is available");
3985        break;
3986      } catch (UnknownHostException e) {
3987        throw new IOException("Failed to look up " + host, e);
3988      } catch (IOException e) {
3989        savedException = e;
3990      }
3991      Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3992    }
3993
3994    if (savedException != null) {
3995      throw savedException;
3996    }
3997  }
3998
3999  /**
4000   * Creates a pre-split table for load testing. If the table already exists,
4001   * logs a warning and continues.
4002   * @return the number of regions the table was split into
4003   */
4004  public static int createPreSplitLoadTestTable(Configuration conf,
4005      TableName tableName, byte[] columnFamily, Algorithm compression,
4006      DataBlockEncoding dataBlockEncoding) throws IOException {
4007    return createPreSplitLoadTestTable(conf, tableName,
4008      columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1,
4009      Durability.USE_DEFAULT);
4010  }
4011  /**
4012   * Creates a pre-split table for load testing. If the table already exists,
4013   * logs a warning and continues.
4014   * @return the number of regions the table was split into
4015   */
4016  public static int createPreSplitLoadTestTable(Configuration conf,
4017      TableName tableName, byte[] columnFamily, Algorithm compression,
4018      DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
4019      Durability durability)
4020          throws IOException {
4021    HTableDescriptor desc = new HTableDescriptor(tableName);
4022    desc.setDurability(durability);
4023    desc.setRegionReplication(regionReplication);
4024    HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
4025    hcd.setDataBlockEncoding(dataBlockEncoding);
4026    hcd.setCompressionType(compression);
4027    return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
4028  }
4029
4030  /**
4031   * Creates a pre-split table for load testing. If the table already exists,
4032   * logs a warning and continues.
4033   * @return the number of regions the table was split into
4034   */
4035  public static int createPreSplitLoadTestTable(Configuration conf,
4036      TableName tableName, byte[][] columnFamilies, Algorithm compression,
4037      DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
4038      Durability durability)
4039          throws IOException {
4040    HTableDescriptor desc = new HTableDescriptor(tableName);
4041    desc.setDurability(durability);
4042    desc.setRegionReplication(regionReplication);
4043    HColumnDescriptor[] hcds = new HColumnDescriptor[columnFamilies.length];
4044    for (int i = 0; i < columnFamilies.length; i++) {
4045      HColumnDescriptor hcd = new HColumnDescriptor(columnFamilies[i]);
4046      hcd.setDataBlockEncoding(dataBlockEncoding);
4047      hcd.setCompressionType(compression);
4048      hcds[i] = hcd;
4049    }
4050    return createPreSplitLoadTestTable(conf, desc, hcds, numRegionsPerServer);
4051  }
4052
4053  /**
4054   * Creates a pre-split table for load testing. If the table already exists,
4055   * logs a warning and continues.
4056   * @return the number of regions the table was split into
4057   */
4058  public static int createPreSplitLoadTestTable(Configuration conf,
4059      TableDescriptor desc, ColumnFamilyDescriptor hcd) throws IOException {
4060    return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
4061  }
4062
4063  /**
4064   * Creates a pre-split table for load testing. If the table already exists,
4065   * logs a warning and continues.
4066   * @return the number of regions the table was split into
4067   */
4068  public static int createPreSplitLoadTestTable(Configuration conf,
4069      TableDescriptor desc, ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException {
4070    return createPreSplitLoadTestTable(conf, desc, new ColumnFamilyDescriptor[] {hcd},
4071        numRegionsPerServer);
4072  }
4073
4074  /**
4075   * Creates a pre-split table for load testing. If the table already exists,
4076   * logs a warning and continues.
4077   * @return the number of regions the table was split into
4078   */
4079  public static int createPreSplitLoadTestTable(Configuration conf,
4080      TableDescriptor desc, ColumnFamilyDescriptor[] hcds,
4081      int numRegionsPerServer) throws IOException {
4082    return createPreSplitLoadTestTable(conf, desc, hcds,
4083      new RegionSplitter.HexStringSplit(), numRegionsPerServer);
4084  }
4085
4086  /**
4087   * Creates a pre-split table for load testing. If the table already exists,
4088   * logs a warning and continues.
4089   * @return the number of regions the table was split into
4090   */
4091  public static int createPreSplitLoadTestTable(Configuration conf,
4092      TableDescriptor td, ColumnFamilyDescriptor[] cds,
4093      SplitAlgorithm splitter, int numRegionsPerServer) throws IOException {
4094    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(td);
4095    for (ColumnFamilyDescriptor cd : cds) {
4096      if (!td.hasColumnFamily(cd.getName())) {
4097        builder.setColumnFamily(cd);
4098      }
4099    }
4100    td = builder.build();
4101    int totalNumberOfRegions = 0;
4102    Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
4103    Admin admin = unmanagedConnection.getAdmin();
4104
4105    try {
4106      // create a table a pre-splits regions.
4107      // The number of splits is set as:
4108      //    region servers * regions per region server).
4109      int numberOfServers = admin.getRegionServers().size();
4110      if (numberOfServers == 0) {
4111        throw new IllegalStateException("No live regionservers");
4112      }
4113
4114      totalNumberOfRegions = numberOfServers * numRegionsPerServer;
4115      LOG.info("Number of live regionservers: " + numberOfServers + ", " +
4116          "pre-splitting table into " + totalNumberOfRegions + " regions " +
4117          "(regions per server: " + numRegionsPerServer + ")");
4118
4119      byte[][] splits = splitter.split(
4120          totalNumberOfRegions);
4121
4122      admin.createTable(td, splits);
4123    } catch (MasterNotRunningException e) {
4124      LOG.error("Master not running", e);
4125      throw new IOException(e);
4126    } catch (TableExistsException e) {
4127      LOG.warn("Table " + td.getTableName() +
4128          " already exists, continuing");
4129    } finally {
4130      admin.close();
4131      unmanagedConnection.close();
4132    }
4133    return totalNumberOfRegions;
4134  }
4135
4136  public static int getMetaRSPort(Connection connection) throws IOException {
4137    try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) {
4138      return locator.getRegionLocation(Bytes.toBytes("")).getPort();
4139    }
4140  }
4141
4142  /**
4143   *  Due to async racing issue, a region may not be in
4144   *  the online region list of a region server yet, after
4145   *  the assignment znode is deleted and the new assignment
4146   *  is recorded in master.
4147   */
4148  public void assertRegionOnServer(
4149      final RegionInfo hri, final ServerName server,
4150      final long timeout) throws IOException, InterruptedException {
4151    long timeoutTime = System.currentTimeMillis() + timeout;
4152    while (true) {
4153      List<RegionInfo> regions = getAdmin().getRegions(server);
4154      if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) return;
4155      long now = System.currentTimeMillis();
4156      if (now > timeoutTime) break;
4157      Thread.sleep(10);
4158    }
4159    fail("Could not find region " + hri.getRegionNameAsString()
4160      + " on server " + server);
4161  }
4162
4163  /**
4164   * Check to make sure the region is open on the specified
4165   * region server, but not on any other one.
4166   */
4167  public void assertRegionOnlyOnServer(
4168      final RegionInfo hri, final ServerName server,
4169      final long timeout) throws IOException, InterruptedException {
4170    long timeoutTime = System.currentTimeMillis() + timeout;
4171    while (true) {
4172      List<RegionInfo> regions = getAdmin().getRegions(server);
4173      if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) {
4174        List<JVMClusterUtil.RegionServerThread> rsThreads =
4175          getHBaseCluster().getLiveRegionServerThreads();
4176        for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
4177          HRegionServer rs = rsThread.getRegionServer();
4178          if (server.equals(rs.getServerName())) {
4179            continue;
4180          }
4181          Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
4182          for (HRegion r: hrs) {
4183            assertTrue("Region should not be double assigned",
4184              r.getRegionInfo().getRegionId() != hri.getRegionId());
4185          }
4186        }
4187        return; // good, we are happy
4188      }
4189      long now = System.currentTimeMillis();
4190      if (now > timeoutTime) break;
4191      Thread.sleep(10);
4192    }
4193    fail("Could not find region " + hri.getRegionNameAsString()
4194      + " on server " + server);
4195  }
4196
4197  public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd) throws IOException {
4198    TableDescriptor td =
4199        TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build();
4200    RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build();
4201    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td);
4202  }
4203
4204  public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd,
4205      BlockCache blockCache) throws IOException {
4206    TableDescriptor td =
4207        TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build();
4208    RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build();
4209    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td, blockCache);
4210  }
4211
4212  public void setFileSystemURI(String fsURI) {
4213    FS_URI = fsURI;
4214  }
4215
4216  /**
4217   * Returns a {@link Predicate} for checking that there are no regions in transition in master
4218   */
4219  public ExplainingPredicate<IOException> predicateNoRegionsInTransition() {
4220    return new ExplainingPredicate<IOException>() {
4221      @Override
4222      public String explainFailure() throws IOException {
4223        final RegionStates regionStates = getMiniHBaseCluster().getMaster()
4224            .getAssignmentManager().getRegionStates();
4225        return "found in transition: " + regionStates.getRegionsInTransition().toString();
4226      }
4227
4228      @Override
4229      public boolean evaluate() throws IOException {
4230        HMaster master = getMiniHBaseCluster().getMaster();
4231        if (master == null) return false;
4232        AssignmentManager am = master.getAssignmentManager();
4233        if (am == null) return false;
4234        return !am.hasRegionsInTransition();
4235      }
4236    };
4237  }
4238
4239  /**
4240   * Returns a {@link Predicate} for checking that table is enabled
4241   */
4242  public Waiter.Predicate<IOException> predicateTableEnabled(final TableName tableName) {
4243    return new ExplainingPredicate<IOException>() {
4244      @Override
4245      public String explainFailure() throws IOException {
4246        return explainTableState(tableName, TableState.State.ENABLED);
4247      }
4248
4249      @Override
4250      public boolean evaluate() throws IOException {
4251        return getAdmin().tableExists(tableName) && getAdmin().isTableEnabled(tableName);
4252      }
4253    };
4254  }
4255
4256  /**
4257   * Returns a {@link Predicate} for checking that table is enabled
4258   */
4259  public Waiter.Predicate<IOException> predicateTableDisabled(final TableName tableName) {
4260    return new ExplainingPredicate<IOException>() {
4261      @Override
4262      public String explainFailure() throws IOException {
4263        return explainTableState(tableName, TableState.State.DISABLED);
4264      }
4265
4266      @Override
4267      public boolean evaluate() throws IOException {
4268        return getAdmin().isTableDisabled(tableName);
4269      }
4270    };
4271  }
4272
4273  /**
4274   * Returns a {@link Predicate} for checking that table is enabled
4275   */
4276  public Waiter.Predicate<IOException> predicateTableAvailable(final TableName tableName) {
4277    return new ExplainingPredicate<IOException>() {
4278      @Override
4279      public String explainFailure() throws IOException {
4280        return explainTableAvailability(tableName);
4281      }
4282
4283      @Override
4284      public boolean evaluate() throws IOException {
4285        boolean tableAvailable = getAdmin().isTableAvailable(tableName);
4286        if (tableAvailable) {
4287          try (Table table = getConnection().getTable(tableName)) {
4288            TableDescriptor htd = table.getDescriptor();
4289            for (HRegionLocation loc : getConnection().getRegionLocator(tableName)
4290                .getAllRegionLocations()) {
4291              Scan scan = new Scan().withStartRow(loc.getRegionInfo().getStartKey())
4292                  .withStopRow(loc.getRegionInfo().getEndKey()).setOneRowLimit()
4293                  .setMaxResultsPerColumnFamily(1).setCacheBlocks(false);
4294              for (byte[] family : htd.getColumnFamilyNames()) {
4295                scan.addFamily(family);
4296              }
4297              try (ResultScanner scanner = table.getScanner(scan)) {
4298                scanner.next();
4299              }
4300            }
4301          }
4302        }
4303        return tableAvailable;
4304      }
4305    };
4306  }
4307
4308  /**
4309   * Wait until no regions in transition.
4310   * @param timeout How long to wait.
4311   * @throws IOException
4312   */
4313  public void waitUntilNoRegionsInTransition(final long timeout) throws IOException {
4314    waitFor(timeout, predicateNoRegionsInTransition());
4315  }
4316
4317  /**
4318   * Wait until no regions in transition. (time limit 15min)
4319   * @throws IOException
4320   */
4321  public void waitUntilNoRegionsInTransition() throws IOException {
4322    waitUntilNoRegionsInTransition(15 * 60000);
4323  }
4324
4325  /**
4326   * Wait until labels is ready in VisibilityLabelsCache.
4327   * @param timeoutMillis
4328   * @param labels
4329   */
4330  public void waitLabelAvailable(long timeoutMillis, final String... labels) {
4331    final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get();
4332    waitFor(timeoutMillis, new Waiter.ExplainingPredicate<RuntimeException>() {
4333
4334      @Override
4335      public boolean evaluate() {
4336        for (String label : labels) {
4337          if (labelsCache.getLabelOrdinal(label) == 0) {
4338            return false;
4339          }
4340        }
4341        return true;
4342      }
4343
4344      @Override
4345      public String explainFailure() {
4346        for (String label : labels) {
4347          if (labelsCache.getLabelOrdinal(label) == 0) {
4348            return label + " is not available yet";
4349          }
4350        }
4351        return "";
4352      }
4353    });
4354  }
4355
4356  /**
4357   * Create a set of column descriptors with the combination of compression,
4358   * encoding, bloom codecs available.
4359   * @return the list of column descriptors
4360   */
4361  public static List<HColumnDescriptor> generateColumnDescriptors() {
4362    return generateColumnDescriptors("");
4363  }
4364
4365  /**
4366   * Create a set of column descriptors with the combination of compression,
4367   * encoding, bloom codecs available.
4368   * @param prefix family names prefix
4369   * @return the list of column descriptors
4370   */
4371  public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
4372    List<HColumnDescriptor> htds = new ArrayList<>();
4373    long familyId = 0;
4374    for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
4375      for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
4376        for (BloomType bloomType: BloomType.values()) {
4377          String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
4378          HColumnDescriptor htd = new HColumnDescriptor(name);
4379          htd.setCompressionType(compressionType);
4380          htd.setDataBlockEncoding(encodingType);
4381          htd.setBloomFilterType(bloomType);
4382          htds.add(htd);
4383          familyId++;
4384        }
4385      }
4386    }
4387    return htds;
4388  }
4389
4390  /**
4391   * Get supported compression algorithms.
4392   * @return supported compression algorithms.
4393   */
4394  public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
4395    String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
4396    List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
4397    for (String algoName : allAlgos) {
4398      try {
4399        Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
4400        algo.getCompressor();
4401        supportedAlgos.add(algo);
4402      } catch (Throwable t) {
4403        // this algo is not available
4404      }
4405    }
4406    return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
4407  }
4408
4409  public Result getClosestRowBefore(Region r, byte[] row, byte[] family) throws IOException {
4410    Scan scan = new Scan(row);
4411    scan.setSmall(true);
4412    scan.setCaching(1);
4413    scan.setReversed(true);
4414    scan.addFamily(family);
4415    try (RegionScanner scanner = r.getScanner(scan)) {
4416      List<Cell> cells = new ArrayList<>(1);
4417      scanner.next(cells);
4418      if (r.getRegionInfo().isMetaRegion() && !isTargetTable(row, cells.get(0))) {
4419        return null;
4420      }
4421      return Result.create(cells);
4422    }
4423  }
4424
4425  private boolean isTargetTable(final byte[] inRow, Cell c) {
4426    String inputRowString = Bytes.toString(inRow);
4427    int i = inputRowString.indexOf(HConstants.DELIMITER);
4428    String outputRowString = Bytes.toString(c.getRowArray(), c.getRowOffset(), c.getRowLength());
4429    int o = outputRowString.indexOf(HConstants.DELIMITER);
4430    return inputRowString.substring(0, i).equals(outputRowString.substring(0, o));
4431  }
4432
4433  /**
4434   * Sets up {@link MiniKdc} for testing security.
4435   * Uses {@link HBaseKerberosUtils} to set the given keytab file as
4436   * {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}.
4437   * FYI, there is also the easier-to-use kerby KDC server and utility for using it,
4438   * {@link org.apache.hadoop.hbase.util.SimpleKdcServerUtil}. The kerby KDC server is preferred;
4439   * less baggage. It came in in HBASE-5291.
4440   */
4441  public MiniKdc setupMiniKdc(File keytabFile) throws Exception {
4442    Properties conf = MiniKdc.createConf();
4443    conf.put(MiniKdc.DEBUG, true);
4444    MiniKdc kdc = null;
4445    File dir = null;
4446    // There is time lag between selecting a port and trying to bind with it. It's possible that
4447    // another service captures the port in between which'll result in BindException.
4448    boolean bindException;
4449    int numTries = 0;
4450    do {
4451      try {
4452        bindException = false;
4453        dir = new File(getDataTestDir("kdc").toUri().getPath());
4454        kdc = new MiniKdc(conf, dir);
4455        kdc.start();
4456      } catch (BindException e) {
4457        FileUtils.deleteDirectory(dir);  // clean directory
4458        numTries++;
4459        if (numTries == 3) {
4460          LOG.error("Failed setting up MiniKDC. Tried " + numTries + " times.");
4461          throw e;
4462        }
4463        LOG.error("BindException encountered when setting up MiniKdc. Trying again.");
4464        bindException = true;
4465      }
4466    } while (bindException);
4467    HBaseKerberosUtils.setKeytabFileForTesting(keytabFile.getAbsolutePath());
4468    return kdc;
4469  }
4470
4471  public int getNumHFiles(final TableName tableName, final byte[] family) {
4472    int numHFiles = 0;
4473    for (RegionServerThread regionServerThread : getMiniHBaseCluster().getRegionServerThreads()) {
4474      numHFiles+= getNumHFilesForRS(regionServerThread.getRegionServer(), tableName,
4475                                    family);
4476    }
4477    return numHFiles;
4478  }
4479
4480  public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName,
4481                               final byte[] family) {
4482    int numHFiles = 0;
4483    for (Region region : rs.getRegions(tableName)) {
4484      numHFiles += region.getStore(family).getStorefilesCount();
4485    }
4486    return numHFiles;
4487  }
4488
4489  public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd, TableDescriptor rtd) {
4490    assertEquals(ltd.getValues().hashCode(), rtd.getValues().hashCode());
4491    Collection<ColumnFamilyDescriptor> ltdFamilies = Arrays.asList(ltd.getColumnFamilies());
4492    Collection<ColumnFamilyDescriptor> rtdFamilies = Arrays.asList(rtd.getColumnFamilies());
4493    assertEquals(ltdFamilies.size(), rtdFamilies.size());
4494    for (Iterator<ColumnFamilyDescriptor> it = ltdFamilies.iterator(), it2 =
4495         rtdFamilies.iterator(); it.hasNext();) {
4496      assertEquals(0,
4497          ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next()));
4498    }
4499  }
4500
4501  /**
4502   * Await the successful return of {@code condition}, sleeping {@code sleepMillis} between
4503   * invocations.
4504   */
4505  public static void await(final long sleepMillis, final BooleanSupplier condition)
4506    throws InterruptedException {
4507    try {
4508      while (!condition.getAsBoolean()) {
4509        Thread.sleep(sleepMillis);
4510      }
4511    } catch (RuntimeException e) {
4512      if (e.getCause() instanceof AssertionError) {
4513        throw (AssertionError) e.getCause();
4514      }
4515      throw e;
4516    }
4517  }
4518}