001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase;
019
020import java.io.File;
021import java.io.IOException;
022import java.io.OutputStream;
023import java.io.UncheckedIOException;
024import java.lang.reflect.Field;
025import java.lang.reflect.Modifier;
026import java.net.BindException;
027import java.net.DatagramSocket;
028import java.net.InetAddress;
029import java.net.ServerSocket;
030import java.net.Socket;
031import java.net.UnknownHostException;
032import java.nio.charset.StandardCharsets;
033import java.security.MessageDigest;
034import java.util.ArrayList;
035import java.util.Arrays;
036import java.util.Collection;
037import java.util.Collections;
038import java.util.HashSet;
039import java.util.Iterator;
040import java.util.List;
041import java.util.Map;
042import java.util.NavigableSet;
043import java.util.Properties;
044import java.util.Random;
045import java.util.Set;
046import java.util.TreeSet;
047import java.util.concurrent.ThreadLocalRandom;
048import java.util.concurrent.TimeUnit;
049import java.util.concurrent.atomic.AtomicReference;
050import java.util.function.BooleanSupplier;
051import org.apache.commons.io.FileUtils;
052import org.apache.commons.lang3.RandomStringUtils;
053import org.apache.hadoop.conf.Configuration;
054import org.apache.hadoop.fs.FileSystem;
055import org.apache.hadoop.fs.Path;
056import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
057import org.apache.hadoop.hbase.Waiter.Predicate;
058import org.apache.hadoop.hbase.client.Admin;
059import org.apache.hadoop.hbase.client.AsyncClusterConnection;
060import org.apache.hadoop.hbase.client.BufferedMutator;
061import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
062import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
063import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
064import org.apache.hadoop.hbase.client.Connection;
065import org.apache.hadoop.hbase.client.ConnectionFactory;
066import org.apache.hadoop.hbase.client.Consistency;
067import org.apache.hadoop.hbase.client.Delete;
068import org.apache.hadoop.hbase.client.Durability;
069import org.apache.hadoop.hbase.client.Get;
070import org.apache.hadoop.hbase.client.Hbck;
071import org.apache.hadoop.hbase.client.MasterRegistry;
072import org.apache.hadoop.hbase.client.Put;
073import org.apache.hadoop.hbase.client.RegionInfo;
074import org.apache.hadoop.hbase.client.RegionInfoBuilder;
075import org.apache.hadoop.hbase.client.RegionLocator;
076import org.apache.hadoop.hbase.client.Result;
077import org.apache.hadoop.hbase.client.ResultScanner;
078import org.apache.hadoop.hbase.client.Scan;
079import org.apache.hadoop.hbase.client.Scan.ReadType;
080import org.apache.hadoop.hbase.client.Table;
081import org.apache.hadoop.hbase.client.TableDescriptor;
082import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
083import org.apache.hadoop.hbase.client.TableState;
084import org.apache.hadoop.hbase.fs.HFileSystem;
085import org.apache.hadoop.hbase.io.compress.Compression;
086import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
087import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
088import org.apache.hadoop.hbase.io.hfile.BlockCache;
089import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
090import org.apache.hadoop.hbase.io.hfile.HFile;
091import org.apache.hadoop.hbase.ipc.RpcServerInterface;
092import org.apache.hadoop.hbase.logging.Log4jUtils;
093import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
094import org.apache.hadoop.hbase.master.HMaster;
095import org.apache.hadoop.hbase.master.RegionState;
096import org.apache.hadoop.hbase.master.ServerManager;
097import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
098import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
099import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
100import org.apache.hadoop.hbase.master.assignment.RegionStates;
101import org.apache.hadoop.hbase.mob.MobFileCache;
102import org.apache.hadoop.hbase.regionserver.BloomType;
103import org.apache.hadoop.hbase.regionserver.ChunkCreator;
104import org.apache.hadoop.hbase.regionserver.HRegion;
105import org.apache.hadoop.hbase.regionserver.HRegionServer;
106import org.apache.hadoop.hbase.regionserver.HStore;
107import org.apache.hadoop.hbase.regionserver.InternalScanner;
108import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
109import org.apache.hadoop.hbase.regionserver.Region;
110import org.apache.hadoop.hbase.regionserver.RegionScanner;
111import org.apache.hadoop.hbase.regionserver.RegionServerServices;
112import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
113import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
114import org.apache.hadoop.hbase.security.User;
115import org.apache.hadoop.hbase.security.UserProvider;
116import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
117import org.apache.hadoop.hbase.util.Bytes;
118import org.apache.hadoop.hbase.util.CommonFSUtils;
119import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
120import org.apache.hadoop.hbase.util.FSUtils;
121import org.apache.hadoop.hbase.util.JVMClusterUtil;
122import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
123import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
124import org.apache.hadoop.hbase.util.Pair;
125import org.apache.hadoop.hbase.util.ReflectionUtils;
126import org.apache.hadoop.hbase.util.RegionSplitter;
127import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
128import org.apache.hadoop.hbase.util.RetryCounter;
129import org.apache.hadoop.hbase.util.Threads;
130import org.apache.hadoop.hbase.wal.WAL;
131import org.apache.hadoop.hbase.wal.WALFactory;
132import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
133import org.apache.hadoop.hbase.zookeeper.ZKConfig;
134import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
135import org.apache.hadoop.hdfs.DFSClient;
136import org.apache.hadoop.hdfs.DistributedFileSystem;
137import org.apache.hadoop.hdfs.MiniDFSCluster;
138import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
139import org.apache.hadoop.mapred.JobConf;
140import org.apache.hadoop.mapred.MiniMRCluster;
141import org.apache.hadoop.mapred.TaskLog;
142import org.apache.hadoop.minikdc.MiniKdc;
143import org.apache.yetus.audience.InterfaceAudience;
144import org.apache.zookeeper.WatchedEvent;
145import org.apache.zookeeper.ZooKeeper;
146import org.apache.zookeeper.ZooKeeper.States;
147
148import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
149
150import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
151
152/**
153 * Facility for testing HBase. Replacement for old HBaseTestCase and HBaseClusterTestCase
154 * functionality. Create an instance and keep it around testing HBase. This class is meant to be
155 * your one-stop shop for anything you might need testing. Manages one cluster at a time only.
156 * Managed cluster can be an in-process {@link MiniHBaseCluster}, or a deployed cluster of type
157 * {@code DistributedHBaseCluster}. Not all methods work with the real cluster. Depends on log4j
158 * being on classpath and hbase-site.xml for logging and test-run configuration. It does not set
159 * logging levels. In the configuration properties, default values for master-info-port and
160 * region-server-port are overridden such that a random port will be assigned (thus avoiding port
161 * contention if another local HBase instance is already running).
162 * <p>
163 * To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
164 * setting it to true.
165 * @deprecated since 3.0.0, will be removed in 4.0.0. Use
166 *             {@link org.apache.hadoop.hbase.testing.TestingHBaseCluster} instead.
167 */
168@InterfaceAudience.Public
169@Deprecated
170public class HBaseTestingUtility extends HBaseZKTestingUtility {
171
172  /**
173   * System property key to get test directory value. Name is as it is because mini dfs has
174   * hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property
175   * used in mini dfs.
176   * @deprecated since 2.0.0 and will be removed in 3.0.0. Can be used only with mini dfs.
177   * @see <a href="https://issues.apache.org/jira/browse/HBASE-19410">HBASE-19410</a>
178   */
179  @Deprecated
180  private static final String TEST_DIRECTORY_KEY = "test.build.data";
181
182  public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
183  /**
184   * The default number of regions per regionserver when creating a pre-split table.
185   */
186  public static final int DEFAULT_REGIONS_PER_SERVER = 3;
187
188  public static final String PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table";
189  public static final boolean PRESPLIT_TEST_TABLE = true;
190
191  private MiniDFSCluster dfsCluster = null;
192
193  private volatile HBaseCluster hbaseCluster = null;
194  private MiniMRCluster mrCluster = null;
195
196  /** If there is a mini cluster running for this testing utility instance. */
197  private volatile boolean miniClusterRunning;
198
199  private String hadoopLogDir;
200
201  /**
202   * Directory on test filesystem where we put the data for this instance of HBaseTestingUtility
203   */
204  private Path dataTestDirOnTestFS = null;
205
206  private final AtomicReference<AsyncClusterConnection> asyncConnection = new AtomicReference<>();
207
208  /** Filesystem URI used for map-reduce mini-cluster setup */
209  private static String FS_URI;
210
211  /** This is for unit tests parameterized with a single boolean. */
212  public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination();
213
214  /**
215   * Checks to see if a specific port is available.
216   * @param port the port number to check for availability
217   * @return <tt>true</tt> if the port is available, or <tt>false</tt> if not
218   */
219  public static boolean available(int port) {
220    ServerSocket ss = null;
221    DatagramSocket ds = null;
222    try {
223      ss = new ServerSocket(port);
224      ss.setReuseAddress(true);
225      ds = new DatagramSocket(port);
226      ds.setReuseAddress(true);
227      return true;
228    } catch (IOException e) {
229      // Do nothing
230    } finally {
231      if (ds != null) {
232        ds.close();
233      }
234
235      if (ss != null) {
236        try {
237          ss.close();
238        } catch (IOException e) {
239          /* should not be thrown */
240        }
241      }
242    }
243
244    return false;
245  }
246
247  /**
248   * Create all combinations of Bloom filters and compression algorithms for testing.
249   */
250  private static List<Object[]> bloomAndCompressionCombinations() {
251    List<Object[]> configurations = new ArrayList<>();
252    for (Compression.Algorithm comprAlgo : HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
253      for (BloomType bloomType : BloomType.values()) {
254        configurations.add(new Object[] { comprAlgo, bloomType });
255      }
256    }
257    return Collections.unmodifiableList(configurations);
258  }
259
260  /**
261   * Create combination of memstoreTS and tags
262   */
263  private static List<Object[]> memStoreTSAndTagsCombination() {
264    List<Object[]> configurations = new ArrayList<>();
265    configurations.add(new Object[] { false, false });
266    configurations.add(new Object[] { false, true });
267    configurations.add(new Object[] { true, false });
268    configurations.add(new Object[] { true, true });
269    return Collections.unmodifiableList(configurations);
270  }
271
272  public static List<Object[]> memStoreTSTagsAndOffheapCombination() {
273    List<Object[]> configurations = new ArrayList<>();
274    configurations.add(new Object[] { false, false, true });
275    configurations.add(new Object[] { false, false, false });
276    configurations.add(new Object[] { false, true, true });
277    configurations.add(new Object[] { false, true, false });
278    configurations.add(new Object[] { true, false, true });
279    configurations.add(new Object[] { true, false, false });
280    configurations.add(new Object[] { true, true, true });
281    configurations.add(new Object[] { true, true, false });
282    return Collections.unmodifiableList(configurations);
283  }
284
285  public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
286    bloomAndCompressionCombinations();
287
288  /**
289   * <p>
290   * Create an HBaseTestingUtility using a default configuration.
291   * <p>
292   * Initially, all tmp files are written to a local test data directory. Once
293   * {@link #startMiniDFSCluster} is called, either directly or via {@link #startMiniCluster()}, tmp
294   * data will be written to the DFS directory instead.
295   */
296  public HBaseTestingUtility() {
297    this(HBaseConfiguration.create());
298  }
299
300  /**
301   * <p>
302   * Create an HBaseTestingUtility using a given configuration.
303   * <p>
304   * Initially, all tmp files are written to a local test data directory. Once
305   * {@link #startMiniDFSCluster} is called, either directly or via {@link #startMiniCluster()}, tmp
306   * data will be written to the DFS directory instead.
307   * @param conf The configuration to use for further operations
308   */
309  public HBaseTestingUtility(Configuration conf) {
310    super(conf);
311
312    // a hbase checksum verification failure will cause unit tests to fail
313    ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
314
315    // Save this for when setting default file:// breaks things
316    if (this.conf.get("fs.defaultFS") != null) {
317      this.conf.set("original.defaultFS", this.conf.get("fs.defaultFS"));
318    }
319    if (this.conf.get(HConstants.HBASE_DIR) != null) {
320      this.conf.set("original.hbase.dir", this.conf.get(HConstants.HBASE_DIR));
321    }
322    // Every cluster is a local cluster until we start DFS
323    // Note that conf could be null, but this.conf will not be
324    String dataTestDir = getDataTestDir().toString();
325    this.conf.set("fs.defaultFS", "file:///");
326    this.conf.set(HConstants.HBASE_DIR, "file://" + dataTestDir);
327    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);
328    this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, false);
329    // If the value for random ports isn't set set it to true, thus making
330    // tests opt-out for random port assignment
331    this.conf.setBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS,
332      this.conf.getBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true));
333  }
334
335  /**
336   * Close both the region {@code r} and it's underlying WAL. For use in tests.
337   */
338  public static void closeRegionAndWAL(final Region r) throws IOException {
339    closeRegionAndWAL((HRegion) r);
340  }
341
342  /**
343   * Close both the HRegion {@code r} and it's underlying WAL. For use in tests.
344   */
345  public static void closeRegionAndWAL(final HRegion r) throws IOException {
346    if (r == null) return;
347    r.close();
348    if (r.getWAL() == null) return;
349    r.getWAL().close();
350  }
351
352  /**
353   * Returns this classes's instance of {@link Configuration}. Be careful how you use the returned
354   * Configuration since {@link Connection} instances can be shared. The Map of Connections is keyed
355   * by the Configuration. If say, a Connection was being used against a cluster that had been
356   * shutdown, see {@link #shutdownMiniCluster()}, then the Connection will no longer be wholesome.
357   * Rather than use the return direct, its usually best to make a copy and use that. Do
358   * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
359   * @return Instance of Configuration.
360   */
361  @Override
362  public Configuration getConfiguration() {
363    return super.getConfiguration();
364  }
365
366  public void setHBaseCluster(HBaseCluster hbaseCluster) {
367    this.hbaseCluster = hbaseCluster;
368  }
369
370  /**
371   * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. Give it a random name so can
372   * have many concurrent tests running if we need to. It needs to amend the
373   * {@link #TEST_DIRECTORY_KEY} System property, as it's what minidfscluster bases it data dir on.
374   * Moding a System property is not the way to do concurrent instances -- another instance could
375   * grab the temporary value unintentionally -- but not anything can do about it at moment; single
376   * instance only is how the minidfscluster works. We also create the underlying directory names
377   * for hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values in the
378   * conf, and as a system property for hadoop.tmp.dir (We do not create them!).
379   * @return The calculated data test build directory, if newly-created.
380   */
381  @Override
382  protected Path setupDataTestDir() {
383    Path testPath = super.setupDataTestDir();
384    if (null == testPath) {
385      return null;
386    }
387
388    createSubDirAndSystemProperty("hadoop.log.dir", testPath, "hadoop-log-dir");
389
390    // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
391    // we want our own value to ensure uniqueness on the same machine
392    createSubDirAndSystemProperty("hadoop.tmp.dir", testPath, "hadoop-tmp-dir");
393
394    // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
395    createSubDir("mapreduce.cluster.local.dir", testPath, "mapred-local-dir");
396    return testPath;
397  }
398
399  private void createSubDirAndSystemProperty(String propertyName, Path parent, String subDirName) {
400
401    String sysValue = System.getProperty(propertyName);
402
403    if (sysValue != null) {
404      // There is already a value set. So we do nothing but hope
405      // that there will be no conflicts
406      LOG.info("System.getProperty(\"" + propertyName + "\") already set to: " + sysValue
407        + " so I do NOT create it in " + parent);
408      String confValue = conf.get(propertyName);
409      if (confValue != null && !confValue.endsWith(sysValue)) {
410        LOG.warn(propertyName + " property value differs in configuration and system: "
411          + "Configuration=" + confValue + " while System=" + sysValue
412          + " Erasing configuration value by system value.");
413      }
414      conf.set(propertyName, sysValue);
415    } else {
416      // Ok, it's not set, so we create it as a subdirectory
417      createSubDir(propertyName, parent, subDirName);
418      System.setProperty(propertyName, conf.get(propertyName));
419    }
420  }
421
422  /**
423   * @return Where to write test data on the test filesystem; Returns working directory for the test
424   *         filesystem by default
425   * @see #setupDataTestDirOnTestFS()
426   * @see #getTestFileSystem()
427   */
428  private Path getBaseTestDirOnTestFS() throws IOException {
429    FileSystem fs = getTestFileSystem();
430    return new Path(fs.getWorkingDirectory(), "test-data");
431  }
432
433  /**
434   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} to write
435   * temporary test data. Call this method after setting up the mini dfs cluster if the test relies
436   * on it.
437   * @return a unique path in the test filesystem
438   */
439  public Path getDataTestDirOnTestFS() throws IOException {
440    if (dataTestDirOnTestFS == null) {
441      setupDataTestDirOnTestFS();
442    }
443
444    return dataTestDirOnTestFS;
445  }
446
447  /**
448   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} to write
449   * temporary test data. Call this method after setting up the mini dfs cluster if the test relies
450   * on it.
451   * @return a unique path in the test filesystem
452   * @param subdirName name of the subdir to create under the base test dir
453   */
454  public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
455    return new Path(getDataTestDirOnTestFS(), subdirName);
456  }
457
458  /**
459   * Sets up a path in test filesystem to be used by tests. Creates a new directory if not already
460   * setup.
461   */
462  private void setupDataTestDirOnTestFS() throws IOException {
463    if (dataTestDirOnTestFS != null) {
464      LOG.warn("Data test on test fs dir already setup in " + dataTestDirOnTestFS.toString());
465      return;
466    }
467    dataTestDirOnTestFS = getNewDataTestDirOnTestFS();
468  }
469
470  /**
471   * Sets up a new path in test filesystem to be used by tests.
472   */
473  private Path getNewDataTestDirOnTestFS() throws IOException {
474    // The file system can be either local, mini dfs, or if the configuration
475    // is supplied externally, it can be an external cluster FS. If it is a local
476    // file system, the tests should use getBaseTestDir, otherwise, we can use
477    // the working directory, and create a unique sub dir there
478    FileSystem fs = getTestFileSystem();
479    Path newDataTestDir;
480    String randomStr = getRandomUUID().toString();
481    if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
482      newDataTestDir = new Path(getDataTestDir(), randomStr);
483      File dataTestDir = new File(newDataTestDir.toString());
484      if (deleteOnExit()) dataTestDir.deleteOnExit();
485    } else {
486      Path base = getBaseTestDirOnTestFS();
487      newDataTestDir = new Path(base, randomStr);
488      if (deleteOnExit()) fs.deleteOnExit(newDataTestDir);
489    }
490    return newDataTestDir;
491  }
492
493  /**
494   * Cleans the test data directory on the test filesystem.
495   * @return True if we removed the test dirs n
496   */
497  public boolean cleanupDataTestDirOnTestFS() throws IOException {
498    boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
499    if (ret) dataTestDirOnTestFS = null;
500    return ret;
501  }
502
503  /**
504   * Cleans a subdirectory under the test data directory on the test filesystem.
505   * @return True if we removed child n
506   */
507  public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
508    Path cpath = getDataTestDirOnTestFS(subdirName);
509    return getTestFileSystem().delete(cpath, true);
510  }
511
512  /**
513   * Start a minidfscluster.
514   * @param servers How many DNs to start. n * @see #shutdownMiniDFSCluster()
515   * @return The mini dfs cluster created.
516   */
517  public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
518    return startMiniDFSCluster(servers, null);
519  }
520
521  /**
522   * Start a minidfscluster. This is useful if you want to run datanode on distinct hosts for things
523   * like HDFS block location verification. If you start MiniDFSCluster without host names, all
524   * instances of the datanodes will have the same host name.
525   * @param hosts hostnames DNs to run on. n * @see #shutdownMiniDFSCluster()
526   * @return The mini dfs cluster created.
527   */
528  public MiniDFSCluster startMiniDFSCluster(final String hosts[]) throws Exception {
529    if (hosts != null && hosts.length != 0) {
530      return startMiniDFSCluster(hosts.length, hosts);
531    } else {
532      return startMiniDFSCluster(1, null);
533    }
534  }
535
536  /**
537   * Start a minidfscluster. Can only create one.
538   * @param servers How many DNs to start.
539   * @param hosts   hostnames DNs to run on. n * @see #shutdownMiniDFSCluster()
540   * @return The mini dfs cluster created.
541   */
542  public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[]) throws Exception {
543    return startMiniDFSCluster(servers, null, hosts);
544  }
545
546  private void setFs() throws IOException {
547    if (this.dfsCluster == null) {
548      LOG.info("Skipping setting fs because dfsCluster is null");
549      return;
550    }
551    FileSystem fs = this.dfsCluster.getFileSystem();
552    CommonFSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
553
554    // re-enable this check with dfs
555    conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE);
556  }
557
558  public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
559    throws Exception {
560    createDirsAndSetProperties();
561    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
562
563    // Error level to skip some warnings specific to the minicluster. See HBASE-4709
564    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR");
565    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(),
566      "ERROR");
567
568    this.dfsCluster =
569      new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null);
570
571    // Set this just-started cluster as our filesystem.
572    setFs();
573
574    // Wait for the cluster to be totally up
575    this.dfsCluster.waitClusterUp();
576
577    // reset the test directory for test file system
578    dataTestDirOnTestFS = null;
579    String dataTestDir = getDataTestDir().toString();
580    conf.set(HConstants.HBASE_DIR, dataTestDir);
581    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);
582
583    return this.dfsCluster;
584  }
585
586  public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException {
587    createDirsAndSetProperties();
588    // Error level to skip some warnings specific to the minicluster. See HBASE-4709
589    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR");
590    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(),
591      "ERROR");
592    dfsCluster =
593      new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
594    return dfsCluster;
595  }
596
597  /**
598   * This is used before starting HDFS and map-reduce mini-clusters Run something like the below to
599   * check for the likes of '/tmp' references -- i.e. references outside of the test data dir -- in
600   * the conf.
601   *
602   * <pre>
603   * Configuration conf = TEST_UTIL.getConfiguration();
604   * for (Iterator&lt;Map.Entry&lt;String, String&gt;&gt; i = conf.iterator(); i.hasNext();) {
605   *   Map.Entry&lt;String, String&gt; e = i.next();
606   *   assertFalse(e.getKey() + " " + e.getValue(), e.getValue().contains("/tmp"));
607   * }
608   * </pre>
609   */
610  private void createDirsAndSetProperties() throws IOException {
611    setupClusterTestDir();
612    conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
613    System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
614    createDirAndSetProperty("test.cache.data");
615    createDirAndSetProperty("hadoop.tmp.dir");
616    hadoopLogDir = createDirAndSetProperty("hadoop.log.dir");
617    createDirAndSetProperty("mapreduce.cluster.local.dir");
618    createDirAndSetProperty("mapreduce.cluster.temp.dir");
619    enableShortCircuit();
620
621    Path root = getDataTestDirOnTestFS("hadoop");
622    conf.set(MapreduceTestingShim.getMROutputDirProp(),
623      new Path(root, "mapred-output-dir").toString());
624    conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());
625    conf.set("mapreduce.jobtracker.staging.root.dir",
626      new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
627    conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());
628    conf.set("yarn.app.mapreduce.am.staging-dir",
629      new Path(root, "mapreduce-am-staging-root-dir").toString());
630
631    // Frustrate yarn's and hdfs's attempts at writing /tmp.
632    // Below is fragile. Make it so we just interpolate any 'tmp' reference.
633    createDirAndSetProperty("yarn.node-labels.fs-store.root-dir");
634    createDirAndSetProperty("yarn.node-attribute.fs-store.root-dir");
635    createDirAndSetProperty("yarn.nodemanager.log-dirs");
636    createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
637    createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.active-dir");
638    createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.done-dir");
639    createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
640    createDirAndSetProperty("dfs.journalnode.edits.dir");
641    createDirAndSetProperty("dfs.datanode.shared.file.descriptor.paths");
642    createDirAndSetProperty("nfs.dump.dir");
643    createDirAndSetProperty("java.io.tmpdir");
644    createDirAndSetProperty("dfs.journalnode.edits.dir");
645    createDirAndSetProperty("dfs.provided.aliasmap.inmemory.leveldb.dir");
646    createDirAndSetProperty("fs.s3a.committer.staging.tmp.path");
647  }
648
649  /**
650   * Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating new column families.
651   * Default to false.
652   */
653  public boolean isNewVersionBehaviorEnabled() {
654    final String propName = "hbase.tests.new.version.behavior";
655    String v = System.getProperty(propName);
656    if (v != null) {
657      return Boolean.parseBoolean(v);
658    }
659    return false;
660  }
661
662  /**
663   * Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property. This
664   * allows to specify this parameter on the command line. If not set, default is true.
665   */
666  public boolean isReadShortCircuitOn() {
667    final String propName = "hbase.tests.use.shortcircuit.reads";
668    String readOnProp = System.getProperty(propName);
669    if (readOnProp != null) {
670      return Boolean.parseBoolean(readOnProp);
671    } else {
672      return conf.getBoolean(propName, false);
673    }
674  }
675
676  /**
677   * Enable the short circuit read, unless configured differently. Set both HBase and HDFS settings,
678   * including skipping the hdfs checksum checks.
679   */
680  private void enableShortCircuit() {
681    if (isReadShortCircuitOn()) {
682      String curUser = System.getProperty("user.name");
683      LOG.info("read short circuit is ON for user " + curUser);
684      // read short circuit, for hdfs
685      conf.set("dfs.block.local-path-access.user", curUser);
686      // read short circuit, for hbase
687      conf.setBoolean("dfs.client.read.shortcircuit", true);
688      // Skip checking checksum, for the hdfs client and the datanode
689      conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
690    } else {
691      LOG.info("read short circuit is OFF");
692    }
693  }
694
695  private String createDirAndSetProperty(final String property) {
696    return createDirAndSetProperty(property, property);
697  }
698
699  private String createDirAndSetProperty(final String relPath, String property) {
700    String path = getDataTestDir(relPath).toString();
701    System.setProperty(property, path);
702    conf.set(property, path);
703    new File(path).mkdirs();
704    LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
705    return path;
706  }
707
708  /**
709   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)} or does nothing. n
710   */
711  public void shutdownMiniDFSCluster() throws IOException {
712    if (this.dfsCluster != null) {
713      // The below throws an exception per dn, AsynchronousCloseException.
714      this.dfsCluster.shutdown();
715      dfsCluster = null;
716      dataTestDirOnTestFS = null;
717      CommonFSUtils.setFsDefault(this.conf, new Path("file:///"));
718    }
719  }
720
721  /**
722   * Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
723   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
724   * @param createWALDir Whether to create a new WAL directory.
725   * @return The mini HBase cluster created.
726   * @see #shutdownMiniCluster()
727   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
728   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
729   * @see #startMiniCluster(StartMiniClusterOption)
730   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
731   */
732  @Deprecated
733  public MiniHBaseCluster startMiniCluster(boolean createWALDir) throws Exception {
734    StartMiniClusterOption option =
735      StartMiniClusterOption.builder().createWALDir(createWALDir).build();
736    return startMiniCluster(option);
737  }
738
739  /**
740   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
741   * defined in {@link StartMiniClusterOption.Builder}.
742   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
743   * @param createRootDir Whether to create a new root or data directory path.
744   * @return The mini HBase cluster created.
745   * @see #shutdownMiniCluster()
746   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
747   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
748   * @see #startMiniCluster(StartMiniClusterOption)
749   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
750   */
751  @Deprecated
752  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir) throws Exception {
753    StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(numSlaves)
754      .numDataNodes(numSlaves).createRootDir(createRootDir).build();
755    return startMiniCluster(option);
756  }
757
758  /**
759   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
760   * defined in {@link StartMiniClusterOption.Builder}.
761   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
762   * @param createRootDir Whether to create a new root or data directory path.
763   * @param createWALDir  Whether to create a new WAL directory.
764   * @return The mini HBase cluster created.
765   * @see #shutdownMiniCluster()
766   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
767   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
768   * @see #startMiniCluster(StartMiniClusterOption)
769   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
770   */
771  @Deprecated
772  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir,
773    boolean createWALDir) throws Exception {
774    StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(numSlaves)
775      .numDataNodes(numSlaves).createRootDir(createRootDir).createWALDir(createWALDir).build();
776    return startMiniCluster(option);
777  }
778
779  /**
780   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
781   * defined in {@link StartMiniClusterOption.Builder}.
782   * @param numMasters    Master node number.
783   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
784   * @param createRootDir Whether to create a new root or data directory path.
785   * @return The mini HBase cluster created.
786   * @see #shutdownMiniCluster()
787   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
788   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
789   * @see #startMiniCluster(StartMiniClusterOption)
790   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
791   */
792  @Deprecated
793  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, boolean createRootDir)
794    throws Exception {
795    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
796      .numRegionServers(numSlaves).createRootDir(createRootDir).numDataNodes(numSlaves).build();
797    return startMiniCluster(option);
798  }
799
800  /**
801   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
802   * defined in {@link StartMiniClusterOption.Builder}.
803   * @param numMasters Master node number.
804   * @param numSlaves  Slave node number, for both HBase region server and HDFS data node.
805   * @return The mini HBase cluster created.
806   * @see #shutdownMiniCluster()
807   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
808   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
809   * @see #startMiniCluster(StartMiniClusterOption)
810   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
811   */
812  @Deprecated
813  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves) throws Exception {
814    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
815      .numRegionServers(numSlaves).numDataNodes(numSlaves).build();
816    return startMiniCluster(option);
817  }
818
819  /**
820   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
821   * defined in {@link StartMiniClusterOption.Builder}.
822   * @param numMasters    Master node number.
823   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
824   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
825   *                      HDFS data node number.
826   * @param createRootDir Whether to create a new root or data directory path.
827   * @return The mini HBase cluster created.
828   * @see #shutdownMiniCluster()
829   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
830   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
831   * @see #startMiniCluster(StartMiniClusterOption)
832   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
833   */
834  @Deprecated
835  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
836    boolean createRootDir) throws Exception {
837    StartMiniClusterOption option =
838      StartMiniClusterOption.builder().numMasters(numMasters).numRegionServers(numSlaves)
839        .createRootDir(createRootDir).numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
840    return startMiniCluster(option);
841  }
842
843  /**
844   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
845   * defined in {@link StartMiniClusterOption.Builder}.
846   * @param numMasters    Master node number.
847   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
848   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
849   *                      HDFS data node number.
850   * @return The mini HBase cluster created.
851   * @see #shutdownMiniCluster()
852   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
853   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
854   * @see #startMiniCluster(StartMiniClusterOption)
855   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
856   */
857  @Deprecated
858  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts)
859    throws Exception {
860    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
861      .numRegionServers(numSlaves).numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
862    return startMiniCluster(option);
863  }
864
865  /**
866   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
867   * defined in {@link StartMiniClusterOption.Builder}.
868   * @param numMasters       Master node number.
869   * @param numRegionServers Number of region servers.
870   * @param numDataNodes     Number of datanodes.
871   * @return The mini HBase cluster created.
872   * @see #shutdownMiniCluster()
873   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
874   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
875   * @see #startMiniCluster(StartMiniClusterOption)
876   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
877   */
878  @Deprecated
879  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes)
880    throws Exception {
881    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
882      .numRegionServers(numRegionServers).numDataNodes(numDataNodes).build();
883    return startMiniCluster(option);
884  }
885
886  /**
887   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
888   * defined in {@link StartMiniClusterOption.Builder}.
889   * @param numMasters    Master node number.
890   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
891   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
892   *                      HDFS data node number.
893   * @param masterClass   The class to use as HMaster, or null for default.
894   * @param rsClass       The class to use as HRegionServer, or null for default.
895   * @return The mini HBase cluster created.
896   * @see #shutdownMiniCluster()
897   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
898   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
899   * @see #startMiniCluster(StartMiniClusterOption)
900   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
901   */
902  @Deprecated
903  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
904    Class<? extends HMaster> masterClass,
905    Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass) throws Exception {
906    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
907      .masterClass(masterClass).numRegionServers(numSlaves).rsClass(rsClass).numDataNodes(numSlaves)
908      .dataNodeHosts(dataNodeHosts).build();
909    return startMiniCluster(option);
910  }
911
912  /**
913   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
914   * defined in {@link StartMiniClusterOption.Builder}.
915   * @param numMasters       Master node number.
916   * @param numRegionServers Number of region servers.
917   * @param numDataNodes     Number of datanodes.
918   * @param dataNodeHosts    The hostnames of DataNodes to run on. If not null, its size will
919   *                         overwrite HDFS data node number.
920   * @param masterClass      The class to use as HMaster, or null for default.
921   * @param rsClass          The class to use as HRegionServer, or null for default.
922   * @return The mini HBase cluster created.
923   * @see #shutdownMiniCluster()
924   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
925   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
926   * @see #startMiniCluster(StartMiniClusterOption)
927   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
928   */
929  @Deprecated
930  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
931    String[] dataNodeHosts, Class<? extends HMaster> masterClass,
932    Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass) throws Exception {
933    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
934      .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass)
935      .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts).build();
936    return startMiniCluster(option);
937  }
938
939  /**
940   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
941   * defined in {@link StartMiniClusterOption.Builder}.
942   * @param numMasters       Master node number.
943   * @param numRegionServers Number of region servers.
944   * @param numDataNodes     Number of datanodes.
945   * @param dataNodeHosts    The hostnames of DataNodes to run on. If not null, its size will
946   *                         overwrite HDFS data node number.
947   * @param masterClass      The class to use as HMaster, or null for default.
948   * @param rsClass          The class to use as HRegionServer, or null for default.
949   * @param createRootDir    Whether to create a new root or data directory path.
950   * @param createWALDir     Whether to create a new WAL directory.
951   * @return The mini HBase cluster created.
952   * @see #shutdownMiniCluster()
953   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
954   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
955   * @see #startMiniCluster(StartMiniClusterOption)
956   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
957   */
958  @Deprecated
959  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
960    String[] dataNodeHosts, Class<? extends HMaster> masterClass,
961    Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass, boolean createRootDir,
962    boolean createWALDir) throws Exception {
963    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
964      .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass)
965      .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts).createRootDir(createRootDir)
966      .createWALDir(createWALDir).build();
967    return startMiniCluster(option);
968  }
969
970  /**
971   * Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number. All
972   * other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
973   * @param numSlaves slave node number, for both HBase region server and HDFS data node.
974   * @see #startMiniCluster(StartMiniClusterOption option)
975   * @see #shutdownMiniDFSCluster()
976   */
977  public MiniHBaseCluster startMiniCluster(int numSlaves) throws Exception {
978    StartMiniClusterOption option =
979      StartMiniClusterOption.builder().numRegionServers(numSlaves).numDataNodes(numSlaves).build();
980    return startMiniCluster(option);
981  }
982
983  /**
984   * Start up a minicluster of hbase, dfs and zookeeper all using default options. Option default
985   * value can be found in {@link StartMiniClusterOption.Builder}.
986   * @see #startMiniCluster(StartMiniClusterOption option)
987   * @see #shutdownMiniDFSCluster()
988   */
989  public MiniHBaseCluster startMiniCluster() throws Exception {
990    return startMiniCluster(StartMiniClusterOption.builder().build());
991  }
992
993  /**
994   * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed. It modifies
995   * Configuration. It homes the cluster data directory under a random subdirectory in a directory
996   * under System property test.build.data, to be cleaned up on exit.
997   * @see #shutdownMiniDFSCluster()
998   */
999  public MiniHBaseCluster startMiniCluster(StartMiniClusterOption option) throws Exception {
1000    LOG.info("Starting up minicluster with option: {}", option);
1001
1002    // If we already put up a cluster, fail.
1003    if (miniClusterRunning) {
1004      throw new IllegalStateException("A mini-cluster is already running");
1005    }
1006    miniClusterRunning = true;
1007
1008    setupClusterTestDir();
1009    System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
1010
1011    // Bring up mini dfs cluster. This spews a bunch of warnings about missing
1012    // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
1013    if (dfsCluster == null) {
1014      LOG.info("STARTING DFS");
1015      dfsCluster = startMiniDFSCluster(option.getNumDataNodes(), option.getDataNodeHosts());
1016    } else {
1017      LOG.info("NOT STARTING DFS");
1018    }
1019
1020    // Start up a zk cluster.
1021    if (getZkCluster() == null) {
1022      startMiniZKCluster(option.getNumZkServers());
1023    }
1024
1025    // Start the MiniHBaseCluster
1026    return startMiniHBaseCluster(option);
1027  }
1028
1029  /**
1030   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
1031   * {@link #startMiniCluster()}. This is useful when doing stepped startup of clusters.
1032   * @return Reference to the hbase mini hbase cluster.
1033   * @see #startMiniCluster(StartMiniClusterOption)
1034   * @see #shutdownMiniHBaseCluster()
1035   */
1036  public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option)
1037    throws IOException, InterruptedException {
1038    // Now do the mini hbase cluster. Set the hbase.rootdir in config.
1039    createRootDir(option.isCreateRootDir());
1040    if (option.isCreateWALDir()) {
1041      createWALRootDir();
1042    }
1043    // Set the hbase.fs.tmp.dir config to make sure that we have some default value. This is
1044    // for tests that do not read hbase-defaults.xml
1045    setHBaseFsTmpDir();
1046
1047    // These settings will make the server waits until this exact number of
1048    // regions servers are connected.
1049    if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
1050      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, option.getNumRegionServers());
1051    }
1052    if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
1053      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, option.getNumRegionServers());
1054    }
1055
1056    // Avoid log flooded with chore execution time, see HBASE-24646 for more details.
1057    Log4jUtils.setLogLevel(org.apache.hadoop.hbase.ScheduledChore.class.getName(), "INFO");
1058
1059    Configuration c = new Configuration(this.conf);
1060    this.hbaseCluster = new MiniHBaseCluster(c, option.getNumMasters(),
1061      option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(),
1062      option.getMasterClass(), option.getRsClass());
1063    // Populate the master address configuration from mini cluster configuration.
1064    conf.set(HConstants.MASTER_ADDRS_KEY, MasterRegistry.getMasterAddr(c));
1065    // Don't leave here till we've done a successful scan of the hbase:meta
1066    try (Table t = getConnection().getTable(TableName.META_TABLE_NAME);
1067      ResultScanner s = t.getScanner(new Scan())) {
1068      for (;;) {
1069        if (s.next() == null) {
1070          break;
1071        }
1072      }
1073    }
1074
1075    getAdmin(); // create immediately the hbaseAdmin
1076    LOG.info("Minicluster is up; activeMaster={}", getHBaseCluster().getMaster());
1077
1078    return (MiniHBaseCluster) hbaseCluster;
1079  }
1080
1081  /**
1082   * Starts up mini hbase cluster using default options. Default options can be found in
1083   * {@link StartMiniClusterOption.Builder}.
1084   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1085   * @see #shutdownMiniHBaseCluster()
1086   */
1087  public MiniHBaseCluster startMiniHBaseCluster() throws IOException, InterruptedException {
1088    return startMiniHBaseCluster(StartMiniClusterOption.builder().build());
1089  }
1090
1091  /**
1092   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
1093   * {@link #startMiniCluster()}. All other options will use default values, defined in
1094   * {@link StartMiniClusterOption.Builder}.
1095   * @param numMasters       Master node number.
1096   * @param numRegionServers Number of region servers.
1097   * @return The mini HBase cluster created.
1098   * @see #shutdownMiniHBaseCluster()
1099   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1100   *             {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1101   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1102   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1103   */
1104  @Deprecated
1105  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers)
1106    throws IOException, InterruptedException {
1107    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
1108      .numRegionServers(numRegionServers).build();
1109    return startMiniHBaseCluster(option);
1110  }
1111
1112  /**
1113   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
1114   * {@link #startMiniCluster()}. All other options will use default values, defined in
1115   * {@link StartMiniClusterOption.Builder}.
1116   * @param numMasters       Master node number.
1117   * @param numRegionServers Number of region servers.
1118   * @param rsPorts          Ports that RegionServer should use.
1119   * @return The mini HBase cluster created.
1120   * @see #shutdownMiniHBaseCluster()
1121   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1122   *             {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1123   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1124   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1125   */
1126  @Deprecated
1127  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
1128    List<Integer> rsPorts) throws IOException, InterruptedException {
1129    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
1130      .numRegionServers(numRegionServers).rsPorts(rsPorts).build();
1131    return startMiniHBaseCluster(option);
1132  }
1133
1134  /**
1135   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
1136   * {@link #startMiniCluster()}. All other options will use default values, defined in
1137   * {@link StartMiniClusterOption.Builder}.
1138   * @param numMasters       Master node number.
1139   * @param numRegionServers Number of region servers.
1140   * @param rsPorts          Ports that RegionServer should use.
1141   * @param masterClass      The class to use as HMaster, or null for default.
1142   * @param rsClass          The class to use as HRegionServer, or null for default.
1143   * @param createRootDir    Whether to create a new root or data directory path.
1144   * @param createWALDir     Whether to create a new WAL directory.
1145   * @return The mini HBase cluster created.
1146   * @see #shutdownMiniHBaseCluster()
1147   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1148   *             {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1149   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1150   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1151   */
1152  @Deprecated
1153  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
1154    List<Integer> rsPorts, Class<? extends HMaster> masterClass,
1155    Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass, boolean createRootDir,
1156    boolean createWALDir) throws IOException, InterruptedException {
1157    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
1158      .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass).rsPorts(rsPorts)
1159      .createRootDir(createRootDir).createWALDir(createWALDir).build();
1160    return startMiniHBaseCluster(option);
1161  }
1162
1163  /**
1164   * Starts the hbase cluster up again after shutting it down previously in a test. Use this if you
1165   * want to keep dfs/zk up and just stop/start hbase.
1166   * @param servers number of region servers
1167   */
1168  public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
1169    this.restartHBaseCluster(servers, null);
1170  }
1171
1172  public void restartHBaseCluster(int servers, List<Integer> ports)
1173    throws IOException, InterruptedException {
1174    StartMiniClusterOption option =
1175      StartMiniClusterOption.builder().numRegionServers(servers).rsPorts(ports).build();
1176    restartHBaseCluster(option);
1177    invalidateConnection();
1178  }
1179
1180  public void restartHBaseCluster(StartMiniClusterOption option)
1181    throws IOException, InterruptedException {
1182    closeConnection();
1183    this.hbaseCluster = new MiniHBaseCluster(this.conf, option.getNumMasters(),
1184      option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(),
1185      option.getMasterClass(), option.getRsClass());
1186    // Don't leave here till we've done a successful scan of the hbase:meta
1187    Connection conn = ConnectionFactory.createConnection(this.conf);
1188    Table t = conn.getTable(TableName.META_TABLE_NAME);
1189    ResultScanner s = t.getScanner(new Scan());
1190    while (s.next() != null) {
1191      // do nothing
1192    }
1193    LOG.info("HBase has been restarted");
1194    s.close();
1195    t.close();
1196    conn.close();
1197  }
1198
1199  /**
1200   * @return Current mini hbase cluster. Only has something in it after a call to
1201   *         {@link #startMiniCluster()}.
1202   * @see #startMiniCluster()
1203   */
1204  public MiniHBaseCluster getMiniHBaseCluster() {
1205    if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
1206      return (MiniHBaseCluster) this.hbaseCluster;
1207    }
1208    throw new RuntimeException(
1209      hbaseCluster + " not an instance of " + MiniHBaseCluster.class.getName());
1210  }
1211
1212  /**
1213   * Stops mini hbase, zk, and hdfs clusters.
1214   * @see #startMiniCluster(int)
1215   */
1216  public void shutdownMiniCluster() throws IOException {
1217    LOG.info("Shutting down minicluster");
1218    shutdownMiniHBaseCluster();
1219    shutdownMiniDFSCluster();
1220    shutdownMiniZKCluster();
1221
1222    cleanupTestDir();
1223    miniClusterRunning = false;
1224    LOG.info("Minicluster is down");
1225  }
1226
1227  /**
1228   * Shutdown HBase mini cluster.Does not shutdown zk or dfs if running.
1229   * @throws java.io.IOException in case command is unsuccessful
1230   */
1231  public void shutdownMiniHBaseCluster() throws IOException {
1232    cleanup();
1233    if (this.hbaseCluster != null) {
1234      this.hbaseCluster.shutdown();
1235      // Wait till hbase is down before going on to shutdown zk.
1236      this.hbaseCluster.waitUntilShutDown();
1237      this.hbaseCluster = null;
1238    }
1239    if (zooKeeperWatcher != null) {
1240      zooKeeperWatcher.close();
1241      zooKeeperWatcher = null;
1242    }
1243  }
1244
1245  /**
1246   * Abruptly Shutdown HBase mini cluster. Does not shutdown zk or dfs if running.
1247   * @throws java.io.IOException throws in case command is unsuccessful
1248   */
1249  public void killMiniHBaseCluster() throws IOException {
1250    cleanup();
1251    if (this.hbaseCluster != null) {
1252      getMiniHBaseCluster().killAll();
1253      this.hbaseCluster = null;
1254    }
1255    if (zooKeeperWatcher != null) {
1256      zooKeeperWatcher.close();
1257      zooKeeperWatcher = null;
1258    }
1259  }
1260
1261  // close hbase admin, close current connection and reset MIN MAX configs for RS.
1262  private void cleanup() throws IOException {
1263    closeConnection();
1264    // unset the configuration for MIN and MAX RS to start
1265    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1266    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1267  }
1268
1269  /**
1270   * Returns the path to the default root dir the minicluster uses. If <code>create</code> is true,
1271   * a new root directory path is fetched irrespective of whether it has been fetched before or not.
1272   * If false, previous path is used. Note: this does not cause the root dir to be created.
1273   * @return Fully qualified path for the default hbase root dir n
1274   */
1275  public Path getDefaultRootDirPath(boolean create) throws IOException {
1276    if (!create) {
1277      return getDataTestDirOnTestFS();
1278    } else {
1279      return getNewDataTestDirOnTestFS();
1280    }
1281  }
1282
1283  /**
1284   * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)} except that
1285   * <code>create</code> flag is false. Note: this does not cause the root dir to be created.
1286   * @return Fully qualified path for the default hbase root dir n
1287   */
1288  public Path getDefaultRootDirPath() throws IOException {
1289    return getDefaultRootDirPath(false);
1290  }
1291
1292  /**
1293   * Creates an hbase rootdir in user home directory. Also creates hbase version file. Normally you
1294   * won't make use of this method. Root hbasedir is created for you as part of mini cluster
1295   * startup. You'd only use this method if you were doing manual operation.
1296   * @param create This flag decides whether to get a new root or data directory path or not, if it
1297   *               has been fetched already. Note : Directory will be made irrespective of whether
1298   *               path has been fetched or not. If directory already exists, it will be overwritten
1299   * @return Fully qualified path to hbase root dir n
1300   */
1301  public Path createRootDir(boolean create) throws IOException {
1302    FileSystem fs = FileSystem.get(this.conf);
1303    Path hbaseRootdir = getDefaultRootDirPath(create);
1304    CommonFSUtils.setRootDir(this.conf, hbaseRootdir);
1305    fs.mkdirs(hbaseRootdir);
1306    FSUtils.setVersion(fs, hbaseRootdir);
1307    return hbaseRootdir;
1308  }
1309
1310  /**
1311   * Same as {@link HBaseTestingUtility#createRootDir(boolean create)} except that
1312   * <code>create</code> flag is false.
1313   * @return Fully qualified path to hbase root dir n
1314   */
1315  public Path createRootDir() throws IOException {
1316    return createRootDir(false);
1317  }
1318
1319  /**
1320   * Creates a hbase walDir in the user's home directory. Normally you won't make use of this
1321   * method. Root hbaseWALDir is created for you as part of mini cluster startup. You'd only use
1322   * this method if you were doing manual operation.
1323   * @return Fully qualified path to hbase root dir n
1324   */
1325  public Path createWALRootDir() throws IOException {
1326    FileSystem fs = FileSystem.get(this.conf);
1327    Path walDir = getNewDataTestDirOnTestFS();
1328    CommonFSUtils.setWALRootDir(this.conf, walDir);
1329    fs.mkdirs(walDir);
1330    return walDir;
1331  }
1332
1333  private void setHBaseFsTmpDir() throws IOException {
1334    String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
1335    if (hbaseFsTmpDirInString == null) {
1336      this.conf.set("hbase.fs.tmp.dir", getDataTestDirOnTestFS("hbase-staging").toString());
1337      LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir"));
1338    } else {
1339      LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
1340    }
1341  }
1342
1343  /**
1344   * Flushes all caches in the mini hbase cluster
1345   */
1346  public void flush() throws IOException {
1347    getMiniHBaseCluster().flushcache();
1348  }
1349
1350  /**
1351   * Flushes all caches in the mini hbase cluster
1352   */
1353  public void flush(TableName tableName) throws IOException {
1354    getMiniHBaseCluster().flushcache(tableName);
1355  }
1356
1357  /**
1358   * Compact all regions in the mini hbase cluster
1359   */
1360  public void compact(boolean major) throws IOException {
1361    getMiniHBaseCluster().compact(major);
1362  }
1363
1364  /**
1365   * Compact all of a table's reagion in the mini hbase cluster
1366   */
1367  public void compact(TableName tableName, boolean major) throws IOException {
1368    getMiniHBaseCluster().compact(tableName, major);
1369  }
1370
1371  /**
1372   * Create a table. nn * @return A Table instance for the created table. n
1373   */
1374  public Table createTable(TableName tableName, String family) throws IOException {
1375    return createTable(tableName, new String[] { family });
1376  }
1377
1378  /**
1379   * Create a table. nn * @return A Table instance for the created table. n
1380   */
1381  public Table createTable(TableName tableName, String[] families) throws IOException {
1382    List<byte[]> fams = new ArrayList<>(families.length);
1383    for (String family : families) {
1384      fams.add(Bytes.toBytes(family));
1385    }
1386    return createTable(tableName, fams.toArray(new byte[0][]));
1387  }
1388
1389  /**
1390   * Create a table. nn * @return A Table instance for the created table. n
1391   */
1392  public Table createTable(TableName tableName, byte[] family) throws IOException {
1393    return createTable(tableName, new byte[][] { family });
1394  }
1395
1396  /**
1397   * Create a table with multiple regions. nnn * @return A Table instance for the created table. n
1398   */
1399  public Table createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
1400    throws IOException {
1401    if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1402    byte[] startKey = Bytes.toBytes("aaaaa");
1403    byte[] endKey = Bytes.toBytes("zzzzz");
1404    byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1405
1406    return createTable(tableName, new byte[][] { family }, splitKeys);
1407  }
1408
1409  /**
1410   * Create a table. nn * @return A Table instance for the created table. n
1411   */
1412  public Table createTable(TableName tableName, byte[][] families) throws IOException {
1413    return createTable(tableName, families, (byte[][]) null);
1414  }
1415
1416  /**
1417   * Create a table with multiple regions. nn * @return A Table instance for the created table. n
1418   */
1419  public Table createMultiRegionTable(TableName tableName, byte[][] families) throws IOException {
1420    return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE);
1421  }
1422
1423  /**
1424   * Create a table with multiple regions. n * @param replicaCount replica count. n * @return A
1425   * Table instance for the created table. n
1426   */
1427  public Table createMultiRegionTable(TableName tableName, int replicaCount, byte[][] families)
1428    throws IOException {
1429    return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE, replicaCount);
1430  }
1431
1432  /**
1433   * Create a table. nnn * @return A Table instance for the created table. n
1434   */
1435  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys)
1436    throws IOException {
1437    return createTable(tableName, families, splitKeys, 1, new Configuration(getConfiguration()));
1438  }
1439
1440  /**
1441   * Create a table.
1442   * @param tableName    the table name
1443   * @param families     the families
1444   * @param splitKeys    the splitkeys
1445   * @param replicaCount the region replica count
1446   * @return A Table instance for the created table.
1447   * @throws IOException throws IOException
1448   */
1449  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
1450    int replicaCount) throws IOException {
1451    return createTable(tableName, families, splitKeys, replicaCount,
1452      new Configuration(getConfiguration()));
1453  }
1454
1455  public Table createTable(TableName tableName, byte[][] families, int numVersions, byte[] startKey,
1456    byte[] endKey, int numRegions) throws IOException {
1457    TableDescriptor desc = createTableDescriptor(tableName, families, numVersions);
1458
1459    getAdmin().createTable(desc, startKey, endKey, numRegions);
1460    // HBaseAdmin only waits for regions to appear in hbase:meta we
1461    // should wait until they are assigned
1462    waitUntilAllRegionsAssigned(tableName);
1463    return getConnection().getTable(tableName);
1464  }
1465
1466  /**
1467   * Create a table.
1468   * @param c Configuration to use
1469   * @return A Table instance for the created table.
1470   */
1471  public Table createTable(TableDescriptor htd, byte[][] families, Configuration c)
1472    throws IOException {
1473    return createTable(htd, families, null, c);
1474  }
1475
1476  /**
1477   * Create a table.
1478   * @param htd       table descriptor
1479   * @param families  array of column families
1480   * @param splitKeys array of split keys
1481   * @param c         Configuration to use
1482   * @return A Table instance for the created table.
1483   * @throws IOException if getAdmin or createTable fails
1484   */
1485  public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
1486    Configuration c) throws IOException {
1487    // Disable blooms (they are on by default as of 0.95) but we disable them here because
1488    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1489    // on is interfering.
1490    return createTable(htd, families, splitKeys, BloomType.NONE, HConstants.DEFAULT_BLOCKSIZE, c);
1491  }
1492
1493  /**
1494   * Create a table.
1495   * @param htd       table descriptor
1496   * @param families  array of column families
1497   * @param splitKeys array of split keys
1498   * @param type      Bloom type
1499   * @param blockSize block size
1500   * @param c         Configuration to use
1501   * @return A Table instance for the created table.
1502   * @throws IOException if getAdmin or createTable fails
1503   */
1504
1505  public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
1506    BloomType type, int blockSize, Configuration c) throws IOException {
1507    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
1508    for (byte[] family : families) {
1509      ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder.newBuilder(family)
1510        .setBloomFilterType(type).setBlocksize(blockSize);
1511      if (isNewVersionBehaviorEnabled()) {
1512        cfdb.setNewVersionBehavior(true);
1513      }
1514      builder.setColumnFamily(cfdb.build());
1515    }
1516    TableDescriptor td = builder.build();
1517    if (splitKeys != null) {
1518      getAdmin().createTable(td, splitKeys);
1519    } else {
1520      getAdmin().createTable(td);
1521    }
1522    // HBaseAdmin only waits for regions to appear in hbase:meta
1523    // we should wait until they are assigned
1524    waitUntilAllRegionsAssigned(td.getTableName());
1525    return getConnection().getTable(td.getTableName());
1526  }
1527
1528  /**
1529   * Create a table.
1530   * @param htd       table descriptor
1531   * @param splitRows array of split keys
1532   * @return A Table instance for the created table. n
1533   */
1534  public Table createTable(TableDescriptor htd, byte[][] splitRows) throws IOException {
1535    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
1536    if (isNewVersionBehaviorEnabled()) {
1537      for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
1538        builder.setColumnFamily(
1539          ColumnFamilyDescriptorBuilder.newBuilder(family).setNewVersionBehavior(true).build());
1540      }
1541    }
1542    if (splitRows != null) {
1543      getAdmin().createTable(builder.build(), splitRows);
1544    } else {
1545      getAdmin().createTable(builder.build());
1546    }
1547    // HBaseAdmin only waits for regions to appear in hbase:meta
1548    // we should wait until they are assigned
1549    waitUntilAllRegionsAssigned(htd.getTableName());
1550    return getConnection().getTable(htd.getTableName());
1551  }
1552
1553  /**
1554   * Create a table.
1555   * @param tableName    the table name
1556   * @param families     the families
1557   * @param splitKeys    the split keys
1558   * @param replicaCount the replica count
1559   * @param c            Configuration to use
1560   * @return A Table instance for the created table.
1561   */
1562  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
1563    int replicaCount, final Configuration c) throws IOException {
1564    TableDescriptor htd =
1565      TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(replicaCount).build();
1566    return createTable(htd, families, splitKeys, c);
1567  }
1568
1569  /**
1570   * Create a table.
1571   * @return A Table instance for the created table.
1572   */
1573  public Table createTable(TableName tableName, byte[] family, int numVersions) throws IOException {
1574    return createTable(tableName, new byte[][] { family }, numVersions);
1575  }
1576
1577  /**
1578   * Create a table.
1579   * @return A Table instance for the created table.
1580   */
1581  public Table createTable(TableName tableName, byte[][] families, int numVersions)
1582    throws IOException {
1583    return createTable(tableName, families, numVersions, (byte[][]) null);
1584  }
1585
1586  /**
1587   * Create a table.
1588   * @return A Table instance for the created table.
1589   */
1590  public Table createTable(TableName tableName, byte[][] families, int numVersions,
1591    byte[][] splitKeys) throws IOException {
1592    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
1593    for (byte[] family : families) {
1594      ColumnFamilyDescriptorBuilder cfBuilder =
1595        ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(numVersions);
1596      if (isNewVersionBehaviorEnabled()) {
1597        cfBuilder.setNewVersionBehavior(true);
1598      }
1599      builder.setColumnFamily(cfBuilder.build());
1600    }
1601    if (splitKeys != null) {
1602      getAdmin().createTable(builder.build(), splitKeys);
1603    } else {
1604      getAdmin().createTable(builder.build());
1605    }
1606    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1607    // assigned
1608    waitUntilAllRegionsAssigned(tableName);
1609    return getConnection().getTable(tableName);
1610  }
1611
1612  /**
1613   * Create a table with multiple regions.
1614   * @return A Table instance for the created table.
1615   */
1616  public Table createMultiRegionTable(TableName tableName, byte[][] families, int numVersions)
1617    throws IOException {
1618    return createTable(tableName, families, numVersions, KEYS_FOR_HBA_CREATE_TABLE);
1619  }
1620
1621  /**
1622   * Create a table.
1623   * @return A Table instance for the created table.
1624   */
1625  public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize)
1626    throws IOException {
1627    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
1628    for (byte[] family : families) {
1629      ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family)
1630        .setMaxVersions(numVersions).setBlocksize(blockSize);
1631      if (isNewVersionBehaviorEnabled()) {
1632        cfBuilder.setNewVersionBehavior(true);
1633      }
1634      builder.setColumnFamily(cfBuilder.build());
1635    }
1636    getAdmin().createTable(builder.build());
1637    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1638    // assigned
1639    waitUntilAllRegionsAssigned(tableName);
1640    return getConnection().getTable(tableName);
1641  }
1642
1643  public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize,
1644    String cpName) throws IOException {
1645    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
1646    for (byte[] family : families) {
1647      ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family)
1648        .setMaxVersions(numVersions).setBlocksize(blockSize);
1649      if (isNewVersionBehaviorEnabled()) {
1650        cfBuilder.setNewVersionBehavior(true);
1651      }
1652      builder.setColumnFamily(cfBuilder.build());
1653    }
1654    if (cpName != null) {
1655      builder.setCoprocessor(cpName);
1656    }
1657    getAdmin().createTable(builder.build());
1658    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1659    // assigned
1660    waitUntilAllRegionsAssigned(tableName);
1661    return getConnection().getTable(tableName);
1662  }
1663
1664  /**
1665   * Create a table.
1666   * @return A Table instance for the created table.
1667   */
1668  public Table createTable(TableName tableName, byte[][] families, int[] numVersions)
1669    throws IOException {
1670    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
1671    int i = 0;
1672    for (byte[] family : families) {
1673      ColumnFamilyDescriptorBuilder cfBuilder =
1674        ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(numVersions[i]);
1675      if (isNewVersionBehaviorEnabled()) {
1676        cfBuilder.setNewVersionBehavior(true);
1677      }
1678      builder.setColumnFamily(cfBuilder.build());
1679      i++;
1680    }
1681    getAdmin().createTable(builder.build());
1682    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1683    // assigned
1684    waitUntilAllRegionsAssigned(tableName);
1685    return getConnection().getTable(tableName);
1686  }
1687
1688  /**
1689   * Create a table.
1690   * @return A Table instance for the created table.
1691   */
1692  public Table createTable(TableName tableName, byte[] family, byte[][] splitRows)
1693    throws IOException {
1694    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
1695    ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family);
1696    if (isNewVersionBehaviorEnabled()) {
1697      cfBuilder.setNewVersionBehavior(true);
1698    }
1699    builder.setColumnFamily(cfBuilder.build());
1700    getAdmin().createTable(builder.build(), splitRows);
1701    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1702    // assigned
1703    waitUntilAllRegionsAssigned(tableName);
1704    return getConnection().getTable(tableName);
1705  }
1706
1707  /**
1708   * Create a table with multiple regions.
1709   * @return A Table instance for the created table.
1710   */
1711  public Table createMultiRegionTable(TableName tableName, byte[] family) throws IOException {
1712    return createTable(tableName, family, KEYS_FOR_HBA_CREATE_TABLE);
1713  }
1714
1715  /**
1716   * Modify a table, synchronous.
1717   * @deprecated since 3.0.0 and will be removed in 4.0.0. Just use
1718   *             {@link Admin#modifyTable(TableDescriptor)} directly as it is synchronous now.
1719   * @see Admin#modifyTable(TableDescriptor)
1720   * @see <a href="https://issues.apache.org/jira/browse/HBASE-22002">HBASE-22002</a>
1721   */
1722  @Deprecated
1723  public static void modifyTableSync(Admin admin, TableDescriptor desc)
1724    throws IOException, InterruptedException {
1725    admin.modifyTable(desc);
1726  }
1727
1728  /**
1729   * Set the number of Region replicas.
1730   */
1731  public static void setReplicas(Admin admin, TableName table, int replicaCount)
1732    throws IOException, InterruptedException {
1733    TableDescriptor desc = TableDescriptorBuilder.newBuilder(admin.getDescriptor(table))
1734      .setRegionReplication(replicaCount).build();
1735    admin.modifyTable(desc);
1736  }
1737
1738  /**
1739   * Drop an existing table
1740   * @param tableName existing table
1741   */
1742  public void deleteTable(TableName tableName) throws IOException {
1743    try {
1744      getAdmin().disableTable(tableName);
1745    } catch (TableNotEnabledException e) {
1746      LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1747    }
1748    getAdmin().deleteTable(tableName);
1749  }
1750
1751  /**
1752   * Drop an existing table
1753   * @param tableName existing table
1754   */
1755  public void deleteTableIfAny(TableName tableName) throws IOException {
1756    try {
1757      deleteTable(tableName);
1758    } catch (TableNotFoundException e) {
1759      // ignore
1760    }
1761  }
1762
1763  // ==========================================================================
1764  // Canned table and table descriptor creation
1765
1766  public final static byte[] fam1 = Bytes.toBytes("colfamily11");
1767  public final static byte[] fam2 = Bytes.toBytes("colfamily21");
1768  public final static byte[] fam3 = Bytes.toBytes("colfamily31");
1769  public static final byte[][] COLUMNS = { fam1, fam2, fam3 };
1770  private static final int MAXVERSIONS = 3;
1771
1772  public static final char FIRST_CHAR = 'a';
1773  public static final char LAST_CHAR = 'z';
1774  public static final byte[] START_KEY_BYTES = { FIRST_CHAR, FIRST_CHAR, FIRST_CHAR };
1775  public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1776
1777  public TableDescriptorBuilder createModifyableTableDescriptor(final String name) {
1778    return createModifyableTableDescriptor(TableName.valueOf(name),
1779      ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, MAXVERSIONS, HConstants.FOREVER,
1780      ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED);
1781  }
1782
1783  public TableDescriptor createTableDescriptor(final TableName name, final int minVersions,
1784    final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1785    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(name);
1786    for (byte[] cfName : new byte[][] { fam1, fam2, fam3 }) {
1787      ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(cfName)
1788        .setMinVersions(minVersions).setMaxVersions(versions).setKeepDeletedCells(keepDeleted)
1789        .setBlockCacheEnabled(false).setTimeToLive(ttl);
1790      if (isNewVersionBehaviorEnabled()) {
1791        cfBuilder.setNewVersionBehavior(true);
1792      }
1793      builder.setColumnFamily(cfBuilder.build());
1794    }
1795    return builder.build();
1796  }
1797
1798  public TableDescriptorBuilder createModifyableTableDescriptor(final TableName name,
1799    final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1800    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(name);
1801    for (byte[] cfName : new byte[][] { fam1, fam2, fam3 }) {
1802      ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(cfName)
1803        .setMinVersions(minVersions).setMaxVersions(versions).setKeepDeletedCells(keepDeleted)
1804        .setBlockCacheEnabled(false).setTimeToLive(ttl);
1805      if (isNewVersionBehaviorEnabled()) {
1806        cfBuilder.setNewVersionBehavior(true);
1807      }
1808      builder.setColumnFamily(cfBuilder.build());
1809    }
1810    return builder;
1811  }
1812
1813  /**
1814   * Create a table of name <code>name</code>.
1815   * @param name Name to give table.
1816   * @return Column descriptor.
1817   */
1818  public TableDescriptor createTableDescriptor(final TableName name) {
1819    return createTableDescriptor(name, ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS,
1820      MAXVERSIONS, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED);
1821  }
1822
1823  public TableDescriptor createTableDescriptor(final TableName tableName, byte[] family) {
1824    return createTableDescriptor(tableName, new byte[][] { family }, 1);
1825  }
1826
1827  public TableDescriptor createTableDescriptor(final TableName tableName, byte[][] families,
1828    int maxVersions) {
1829    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
1830    for (byte[] family : families) {
1831      ColumnFamilyDescriptorBuilder cfBuilder =
1832        ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(maxVersions);
1833      if (isNewVersionBehaviorEnabled()) {
1834        cfBuilder.setNewVersionBehavior(true);
1835      }
1836      builder.setColumnFamily(cfBuilder.build());
1837    }
1838    return builder.build();
1839  }
1840
1841  /**
1842   * Create an HRegion that writes to the local tmp dirs
1843   * @param desc     a table descriptor indicating which table the region belongs to
1844   * @param startKey the start boundary of the region
1845   * @param endKey   the end boundary of the region
1846   * @return a region that writes to local dir for testing
1847   */
1848  public HRegion createLocalHRegion(TableDescriptor desc, byte[] startKey, byte[] endKey)
1849    throws IOException {
1850    RegionInfo hri = RegionInfoBuilder.newBuilder(desc.getTableName()).setStartKey(startKey)
1851      .setEndKey(endKey).build();
1852    return createLocalHRegion(hri, desc);
1853  }
1854
1855  /**
1856   * Create an HRegion that writes to the local tmp dirs. Creates the WAL for you. Be sure to call
1857   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when you're finished with it.
1858   */
1859  public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) throws IOException {
1860    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), desc);
1861  }
1862
1863  /**
1864   * Create an HRegion that writes to the local tmp dirs with specified wal
1865   * @param info regioninfo
1866   * @param conf configuration
1867   * @param desc table descriptor
1868   * @param wal  wal for this region.
1869   * @return created hregion n
1870   */
1871  public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDescriptor desc,
1872    WAL wal) throws IOException {
1873    return HRegion.createHRegion(info, getDataTestDir(), conf, desc, wal);
1874  }
1875
1876  /**
1877   * nnnnn * @return A region on which you must call
1878   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. n
1879   */
1880  public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
1881    Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families)
1882    throws IOException {
1883    return createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, conf, isReadOnly,
1884      durability, wal, null, families);
1885  }
1886
1887  public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] startKey,
1888    byte[] stopKey, Configuration conf, boolean isReadOnly, Durability durability, WAL wal,
1889    boolean[] compactedMemStore, byte[]... families) throws IOException {
1890    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
1891    builder.setReadOnly(isReadOnly);
1892    int i = 0;
1893    for (byte[] family : families) {
1894      ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family);
1895      if (compactedMemStore != null && i < compactedMemStore.length) {
1896        cfBuilder.setInMemoryCompaction(MemoryCompactionPolicy.BASIC);
1897      } else {
1898        cfBuilder.setInMemoryCompaction(MemoryCompactionPolicy.NONE);
1899
1900      }
1901      i++;
1902      // Set default to be three versions.
1903      cfBuilder.setMaxVersions(Integer.MAX_VALUE);
1904      builder.setColumnFamily(cfBuilder.build());
1905    }
1906    builder.setDurability(durability);
1907    RegionInfo info =
1908      RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(stopKey).build();
1909    return createLocalHRegion(info, conf, builder.build(), wal);
1910  }
1911
1912  //
1913  // ==========================================================================
1914
1915  /**
1916   * Provide an existing table name to truncate. Scans the table and issues a delete for each row
1917   * read.
1918   * @param tableName existing table
1919   * @return HTable to that new table n
1920   */
1921  public Table deleteTableData(TableName tableName) throws IOException {
1922    Table table = getConnection().getTable(tableName);
1923    Scan scan = new Scan();
1924    ResultScanner resScan = table.getScanner(scan);
1925    for (Result res : resScan) {
1926      Delete del = new Delete(res.getRow());
1927      table.delete(del);
1928    }
1929    resScan = table.getScanner(scan);
1930    resScan.close();
1931    return table;
1932  }
1933
1934  /**
1935   * Truncate a table using the admin command. Effectively disables, deletes, and recreates the
1936   * table.
1937   * @param tableName       table which must exist.
1938   * @param preserveRegions keep the existing split points
1939   * @return HTable for the new table
1940   */
1941  public Table truncateTable(final TableName tableName, final boolean preserveRegions)
1942    throws IOException {
1943    Admin admin = getAdmin();
1944    if (!admin.isTableDisabled(tableName)) {
1945      admin.disableTable(tableName);
1946    }
1947    admin.truncateTable(tableName, preserveRegions);
1948    return getConnection().getTable(tableName);
1949  }
1950
1951  /**
1952   * Truncate a table using the admin command. Effectively disables, deletes, and recreates the
1953   * table. For previous behavior of issuing row deletes, see deleteTableData. Expressly does not
1954   * preserve regions of existing table.
1955   * @param tableName table which must exist.
1956   * @return HTable for the new table
1957   */
1958  public Table truncateTable(final TableName tableName) throws IOException {
1959    return truncateTable(tableName, false);
1960  }
1961
1962  /**
1963   * Load table with rows from 'aaa' to 'zzz'.
1964   * @param t Table
1965   * @param f Family
1966   * @return Count of rows loaded. n
1967   */
1968  public int loadTable(final Table t, final byte[] f) throws IOException {
1969    return loadTable(t, new byte[][] { f });
1970  }
1971
1972  /**
1973   * Load table with rows from 'aaa' to 'zzz'.
1974   * @param t Table
1975   * @param f Family
1976   * @return Count of rows loaded. n
1977   */
1978  public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
1979    return loadTable(t, new byte[][] { f }, null, writeToWAL);
1980  }
1981
1982  /**
1983   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1984   * @param t Table
1985   * @param f Array of Families to load
1986   * @return Count of rows loaded. n
1987   */
1988  public int loadTable(final Table t, final byte[][] f) throws IOException {
1989    return loadTable(t, f, null);
1990  }
1991
1992  /**
1993   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1994   * @param t     Table
1995   * @param f     Array of Families to load
1996   * @param value the values of the cells. If null is passed, the row key is used as value
1997   * @return Count of rows loaded. n
1998   */
1999  public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
2000    return loadTable(t, f, value, true);
2001  }
2002
2003  /**
2004   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2005   * @param t     Table
2006   * @param f     Array of Families to load
2007   * @param value the values of the cells. If null is passed, the row key is used as value
2008   * @return Count of rows loaded. n
2009   */
2010  public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL)
2011    throws IOException {
2012    List<Put> puts = new ArrayList<>();
2013    for (byte[] row : HBaseTestingUtility.ROWS) {
2014      Put put = new Put(row);
2015      put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
2016      for (int i = 0; i < f.length; i++) {
2017        byte[] value1 = value != null ? value : row;
2018        put.addColumn(f[i], f[i], value1);
2019      }
2020      puts.add(put);
2021    }
2022    t.put(puts);
2023    return puts.size();
2024  }
2025
2026  /**
2027   * A tracker for tracking and validating table rows generated with
2028   * {@link HBaseTestingUtility#loadTable(Table, byte[])}
2029   */
2030  public static class SeenRowTracker {
2031    int dim = 'z' - 'a' + 1;
2032    int[][][] seenRows = new int[dim][dim][dim]; // count of how many times the row is seen
2033    byte[] startRow;
2034    byte[] stopRow;
2035
2036    public SeenRowTracker(byte[] startRow, byte[] stopRow) {
2037      this.startRow = startRow;
2038      this.stopRow = stopRow;
2039    }
2040
2041    void reset() {
2042      for (byte[] row : ROWS) {
2043        seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
2044      }
2045    }
2046
2047    int i(byte b) {
2048      return b - 'a';
2049    }
2050
2051    public void addRow(byte[] row) {
2052      seenRows[i(row[0])][i(row[1])][i(row[2])]++;
2053    }
2054
2055    /**
2056     * Validate that all the rows between startRow and stopRow are seen exactly once, and all other
2057     * rows none
2058     */
2059    public void validate() {
2060      for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2061        for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2062          for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2063            int count = seenRows[i(b1)][i(b2)][i(b3)];
2064            int expectedCount = 0;
2065            if (
2066              Bytes.compareTo(new byte[] { b1, b2, b3 }, startRow) >= 0
2067                && Bytes.compareTo(new byte[] { b1, b2, b3 }, stopRow) < 0
2068            ) {
2069              expectedCount = 1;
2070            }
2071            if (count != expectedCount) {
2072              String row = new String(new byte[] { b1, b2, b3 }, StandardCharsets.UTF_8);
2073              throw new RuntimeException("Row:" + row + " has a seen count of " + count + " "
2074                + "instead of " + expectedCount);
2075            }
2076          }
2077        }
2078      }
2079    }
2080  }
2081
2082  public int loadRegion(final HRegion r, final byte[] f) throws IOException {
2083    return loadRegion(r, f, false);
2084  }
2085
2086  public int loadRegion(final Region r, final byte[] f) throws IOException {
2087    return loadRegion((HRegion) r, f);
2088  }
2089
2090  /**
2091   * Load region with rows from 'aaa' to 'zzz'.
2092   * @param r     Region
2093   * @param f     Family
2094   * @param flush flush the cache if true
2095   * @return Count of rows loaded. n
2096   */
2097  public int loadRegion(final HRegion r, final byte[] f, final boolean flush) throws IOException {
2098    byte[] k = new byte[3];
2099    int rowCount = 0;
2100    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2101      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2102        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2103          k[0] = b1;
2104          k[1] = b2;
2105          k[2] = b3;
2106          Put put = new Put(k);
2107          put.setDurability(Durability.SKIP_WAL);
2108          put.addColumn(f, null, k);
2109          if (r.getWAL() == null) {
2110            put.setDurability(Durability.SKIP_WAL);
2111          }
2112          int preRowCount = rowCount;
2113          int pause = 10;
2114          int maxPause = 1000;
2115          while (rowCount == preRowCount) {
2116            try {
2117              r.put(put);
2118              rowCount++;
2119            } catch (RegionTooBusyException e) {
2120              pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
2121              Threads.sleep(pause);
2122            }
2123          }
2124        }
2125      }
2126      if (flush) {
2127        r.flush(true);
2128      }
2129    }
2130    return rowCount;
2131  }
2132
2133  public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow)
2134    throws IOException {
2135    for (int i = startRow; i < endRow; i++) {
2136      byte[] data = Bytes.toBytes(String.valueOf(i));
2137      Put put = new Put(data);
2138      put.addColumn(f, null, data);
2139      t.put(put);
2140    }
2141  }
2142
2143  public void loadRandomRows(final Table t, final byte[] f, int rowSize, int totalRows)
2144    throws IOException {
2145    byte[] row = new byte[rowSize];
2146    for (int i = 0; i < totalRows; i++) {
2147      Bytes.random(row);
2148      Put put = new Put(row);
2149      put.addColumn(f, new byte[] { 0 }, new byte[] { 0 });
2150      t.put(put);
2151    }
2152  }
2153
2154  public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
2155    int replicaId) throws IOException {
2156    for (int i = startRow; i < endRow; i++) {
2157      String failMsg = "Failed verification of row :" + i;
2158      byte[] data = Bytes.toBytes(String.valueOf(i));
2159      Get get = new Get(data);
2160      get.setReplicaId(replicaId);
2161      get.setConsistency(Consistency.TIMELINE);
2162      Result result = table.get(get);
2163      if (!result.containsColumn(f, null)) {
2164        throw new AssertionError(failMsg);
2165      }
2166      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
2167      Cell cell = result.getColumnLatestCell(f, null);
2168      if (
2169        !Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
2170          cell.getValueLength())
2171      ) {
2172        throw new AssertionError(failMsg);
2173      }
2174    }
2175  }
2176
2177  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow)
2178    throws IOException {
2179    verifyNumericRows((HRegion) region, f, startRow, endRow);
2180  }
2181
2182  public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow)
2183    throws IOException {
2184    verifyNumericRows(region, f, startRow, endRow, true);
2185  }
2186
2187  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow,
2188    final boolean present) throws IOException {
2189    verifyNumericRows((HRegion) region, f, startRow, endRow, present);
2190  }
2191
2192  public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
2193    final boolean present) throws IOException {
2194    for (int i = startRow; i < endRow; i++) {
2195      String failMsg = "Failed verification of row :" + i;
2196      byte[] data = Bytes.toBytes(String.valueOf(i));
2197      Result result = region.get(new Get(data));
2198
2199      boolean hasResult = result != null && !result.isEmpty();
2200      if (present != hasResult) {
2201        throw new AssertionError(
2202          failMsg + result + " expected:<" + present + "> but was:<" + hasResult + ">");
2203      }
2204      if (!present) continue;
2205
2206      if (!result.containsColumn(f, null)) {
2207        throw new AssertionError(failMsg);
2208      }
2209      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
2210      Cell cell = result.getColumnLatestCell(f, null);
2211      if (
2212        !Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
2213          cell.getValueLength())
2214      ) {
2215        throw new AssertionError(failMsg);
2216      }
2217    }
2218  }
2219
2220  public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow)
2221    throws IOException {
2222    for (int i = startRow; i < endRow; i++) {
2223      byte[] data = Bytes.toBytes(String.valueOf(i));
2224      Delete delete = new Delete(data);
2225      delete.addFamily(f);
2226      t.delete(delete);
2227    }
2228  }
2229
2230  /**
2231   * Return the number of rows in the given table.
2232   * @param table to count rows
2233   * @return count of rows
2234   */
2235  public static int countRows(final Table table) throws IOException {
2236    return countRows(table, new Scan());
2237  }
2238
2239  public static int countRows(final Table table, final Scan scan) throws IOException {
2240    try (ResultScanner results = table.getScanner(scan)) {
2241      int count = 0;
2242      while (results.next() != null) {
2243        count++;
2244      }
2245      return count;
2246    }
2247  }
2248
2249  public int countRows(final Table table, final byte[]... families) throws IOException {
2250    Scan scan = new Scan();
2251    for (byte[] family : families) {
2252      scan.addFamily(family);
2253    }
2254    return countRows(table, scan);
2255  }
2256
2257  /**
2258   * Return the number of rows in the given table.
2259   */
2260  public int countRows(final TableName tableName) throws IOException {
2261    Table table = getConnection().getTable(tableName);
2262    try {
2263      return countRows(table);
2264    } finally {
2265      table.close();
2266    }
2267  }
2268
2269  public int countRows(final Region region) throws IOException {
2270    return countRows(region, new Scan());
2271  }
2272
2273  public int countRows(final Region region, final Scan scan) throws IOException {
2274    InternalScanner scanner = region.getScanner(scan);
2275    try {
2276      return countRows(scanner);
2277    } finally {
2278      scanner.close();
2279    }
2280  }
2281
2282  public int countRows(final InternalScanner scanner) throws IOException {
2283    int scannedCount = 0;
2284    List<Cell> results = new ArrayList<>();
2285    boolean hasMore = true;
2286    while (hasMore) {
2287      hasMore = scanner.next(results);
2288      scannedCount += results.size();
2289      results.clear();
2290    }
2291    return scannedCount;
2292  }
2293
2294  /**
2295   * Return an md5 digest of the entire contents of a table.
2296   */
2297  public String checksumRows(final Table table) throws Exception {
2298
2299    Scan scan = new Scan();
2300    ResultScanner results = table.getScanner(scan);
2301    MessageDigest digest = MessageDigest.getInstance("MD5");
2302    for (Result res : results) {
2303      digest.update(res.getRow());
2304    }
2305    results.close();
2306    return digest.toString();
2307  }
2308
2309  /** All the row values for the data loaded by {@link #loadTable(Table, byte[])} */
2310  public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
2311  static {
2312    int i = 0;
2313    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2314      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2315        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2316          ROWS[i][0] = b1;
2317          ROWS[i][1] = b2;
2318          ROWS[i][2] = b3;
2319          i++;
2320        }
2321      }
2322    }
2323  }
2324
2325  public static final byte[][] KEYS = { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
2326    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"),
2327    Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"),
2328    Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2329    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"),
2330    Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"),
2331    Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy") };
2332
2333  public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = { Bytes.toBytes("bbb"),
2334    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"),
2335    Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"),
2336    Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2337    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"),
2338    Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"),
2339    Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz") };
2340
2341  /**
2342   * Create rows in hbase:meta for regions of the specified table with the specified start keys. The
2343   * first startKey should be a 0 length byte array if you want to form a proper range of regions.
2344   * nnn * @return list of region info for regions added to meta n
2345   */
2346  public List<RegionInfo> createMultiRegionsInMeta(final Configuration conf,
2347    final TableDescriptor htd, byte[][] startKeys) throws IOException {
2348    Table meta = getConnection().getTable(TableName.META_TABLE_NAME);
2349    Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2350    List<RegionInfo> newRegions = new ArrayList<>(startKeys.length);
2351    MetaTableAccessor.updateTableState(getConnection(), htd.getTableName(),
2352      TableState.State.ENABLED);
2353    // add custom ones
2354    for (int i = 0; i < startKeys.length; i++) {
2355      int j = (i + 1) % startKeys.length;
2356      RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKeys[i])
2357        .setEndKey(startKeys[j]).build();
2358      MetaTableAccessor.addRegionsToMeta(getConnection(), Collections.singletonList(hri), 1);
2359      newRegions.add(hri);
2360    }
2361
2362    meta.close();
2363    return newRegions;
2364  }
2365
2366  /**
2367   * Create an unmanaged WAL. Be sure to close it when you're through.
2368   */
2369  public static WAL createWal(final Configuration conf, final Path rootDir, final RegionInfo hri)
2370    throws IOException {
2371    // The WAL subsystem will use the default rootDir rather than the passed in rootDir
2372    // unless I pass along via the conf.
2373    Configuration confForWAL = new Configuration(conf);
2374    confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
2375    return new WALFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)).getWAL(hri);
2376  }
2377
2378  /**
2379   * Create a region with it's own WAL. Be sure to call
2380   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2381   */
2382  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2383    final Configuration conf, final TableDescriptor htd) throws IOException {
2384    return createRegionAndWAL(info, rootDir, conf, htd, true);
2385  }
2386
2387  /**
2388   * Create a region with it's own WAL. Be sure to call
2389   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2390   */
2391  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2392    final Configuration conf, final TableDescriptor htd, BlockCache blockCache) throws IOException {
2393    HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
2394    region.setBlockCache(blockCache);
2395    region.initialize();
2396    return region;
2397  }
2398
2399  /**
2400   * Create a region with it's own WAL. Be sure to call
2401   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2402   */
2403  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2404    final Configuration conf, final TableDescriptor htd, MobFileCache mobFileCache)
2405    throws IOException {
2406    HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
2407    region.setMobFileCache(mobFileCache);
2408    region.initialize();
2409    return region;
2410  }
2411
2412  /**
2413   * Create a region with it's own WAL. Be sure to call
2414   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2415   */
2416  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2417    final Configuration conf, final TableDescriptor htd, boolean initialize) throws IOException {
2418    ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null,
2419      MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
2420    WAL wal = createWal(conf, rootDir, info);
2421    return HRegion.createHRegion(info, rootDir, conf, htd, wal, initialize);
2422  }
2423
2424  /**
2425   * Returns all rows from the hbase:meta table.
2426   * @throws IOException When reading the rows fails.
2427   */
2428  public List<byte[]> getMetaTableRows() throws IOException {
2429    // TODO: Redo using MetaTableAccessor class
2430    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
2431    List<byte[]> rows = new ArrayList<>();
2432    ResultScanner s = t.getScanner(new Scan());
2433    for (Result result : s) {
2434      LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()));
2435      rows.add(result.getRow());
2436    }
2437    s.close();
2438    t.close();
2439    return rows;
2440  }
2441
2442  /**
2443   * Returns all rows from the hbase:meta table for a given user table
2444   * @throws IOException When reading the rows fails.
2445   */
2446  public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2447    // TODO: Redo using MetaTableAccessor.
2448    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
2449    List<byte[]> rows = new ArrayList<>();
2450    ResultScanner s = t.getScanner(new Scan());
2451    for (Result result : s) {
2452      RegionInfo info = CatalogFamilyFormat.getRegionInfo(result);
2453      if (info == null) {
2454        LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2455        // TODO figure out what to do for this new hosed case.
2456        continue;
2457      }
2458
2459      if (info.getTable().equals(tableName)) {
2460        LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()) + info);
2461        rows.add(result.getRow());
2462      }
2463    }
2464    s.close();
2465    t.close();
2466    return rows;
2467  }
2468
2469  /**
2470   * Returns all regions of the specified table
2471   * @param tableName the table name
2472   * @return all regions of the specified table
2473   * @throws IOException when getting the regions fails.
2474   */
2475  private List<RegionInfo> getRegions(TableName tableName) throws IOException {
2476    try (Admin admin = getConnection().getAdmin()) {
2477      return admin.getRegions(tableName);
2478    }
2479  }
2480
2481  /*
2482   * Find any other region server which is different from the one identified by parameter n
2483   * * @return another region server
2484   */
2485  public HRegionServer getOtherRegionServer(HRegionServer rs) {
2486    for (JVMClusterUtil.RegionServerThread rst : getMiniHBaseCluster().getRegionServerThreads()) {
2487      if (!(rst.getRegionServer() == rs)) {
2488        return rst.getRegionServer();
2489      }
2490    }
2491    return null;
2492  }
2493
2494  /**
2495   * Tool to get the reference to the region server object that holds the region of the specified
2496   * user table.
2497   * @param tableName user table to lookup in hbase:meta
2498   * @return region server that holds it, null if the row doesn't exist nn
2499   */
2500  public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2501    throws IOException, InterruptedException {
2502    List<RegionInfo> regions = getRegions(tableName);
2503    if (regions == null || regions.isEmpty()) {
2504      return null;
2505    }
2506    LOG.debug("Found " + regions.size() + " regions for table " + tableName);
2507
2508    byte[] firstRegionName =
2509      regions.stream().filter(r -> !r.isOffline()).map(RegionInfo::getRegionName).findFirst()
2510        .orElseThrow(() -> new IOException("online regions not found in table " + tableName));
2511
2512    LOG.debug("firstRegionName=" + Bytes.toString(firstRegionName));
2513    long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2514      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2515    int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2516      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2517    RetryCounter retrier = new RetryCounter(numRetries + 1, (int) pause, TimeUnit.MICROSECONDS);
2518    while (retrier.shouldRetry()) {
2519      int index = getMiniHBaseCluster().getServerWith(firstRegionName);
2520      if (index != -1) {
2521        return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2522      }
2523      // Came back -1. Region may not be online yet. Sleep a while.
2524      retrier.sleepUntilNextRetry();
2525    }
2526    return null;
2527  }
2528
2529  /**
2530   * Starts a <code>MiniMRCluster</code> with a default number of <code>TaskTracker</code>'s.
2531   * @throws IOException When starting the cluster fails.
2532   */
2533  public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2534    // Set a very high max-disk-utilization percentage to avoid the NodeManagers from failing.
2535    conf.setIfUnset("yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
2536      "99.0");
2537    startMiniMapReduceCluster(2);
2538    return mrCluster;
2539  }
2540
2541  /**
2542   * Tasktracker has a bug where changing the hadoop.log.dir system property will not change its
2543   * internal static LOG_DIR variable.
2544   */
2545  private void forceChangeTaskLogDir() {
2546    Field logDirField;
2547    try {
2548      logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2549      logDirField.setAccessible(true);
2550
2551      Field modifiersField = ReflectionUtils.getModifiersField();
2552      modifiersField.setAccessible(true);
2553      modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2554
2555      logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2556    } catch (SecurityException e) {
2557      throw new RuntimeException(e);
2558    } catch (NoSuchFieldException e) {
2559      throw new RuntimeException(e);
2560    } catch (IllegalArgumentException e) {
2561      throw new RuntimeException(e);
2562    } catch (IllegalAccessException e) {
2563      throw new RuntimeException(e);
2564    }
2565  }
2566
2567  /**
2568   * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2569   * filesystem.
2570   * @param servers The number of <code>TaskTracker</code>'s to start.
2571   * @throws IOException When starting the cluster fails.
2572   */
2573  private void startMiniMapReduceCluster(final int servers) throws IOException {
2574    if (mrCluster != null) {
2575      throw new IllegalStateException("MiniMRCluster is already running");
2576    }
2577    LOG.info("Starting mini mapreduce cluster...");
2578    setupClusterTestDir();
2579    createDirsAndSetProperties();
2580
2581    forceChangeTaskLogDir();
2582
2583    //// hadoop2 specific settings
2584    // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2585    // we up the VM usable so that processes don't get killed.
2586    conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2587
2588    // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2589    // this avoids the problem by disabling speculative task execution in tests.
2590    conf.setBoolean("mapreduce.map.speculative", false);
2591    conf.setBoolean("mapreduce.reduce.speculative", false);
2592    ////
2593
2594    // Allow the user to override FS URI for this map-reduce cluster to use.
2595    mrCluster =
2596      new MiniMRCluster(servers, FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(),
2597        1, null, null, new JobConf(this.conf));
2598    JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2599    if (jobConf == null) {
2600      jobConf = mrCluster.createJobConf();
2601    }
2602    // Hadoop MiniMR overwrites this while it should not
2603    jobConf.set("mapreduce.cluster.local.dir", conf.get("mapreduce.cluster.local.dir"));
2604    LOG.info("Mini mapreduce cluster started");
2605
2606    // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2607    // Our HBase MR jobs need several of these settings in order to properly run. So we copy the
2608    // necessary config properties here. YARN-129 required adding a few properties.
2609    conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
2610    // this for mrv2 support; mr1 ignores this
2611    conf.set("mapreduce.framework.name", "yarn");
2612    conf.setBoolean("yarn.is.minicluster", true);
2613    String rmAddress = jobConf.get("yarn.resourcemanager.address");
2614    if (rmAddress != null) {
2615      conf.set("yarn.resourcemanager.address", rmAddress);
2616    }
2617    String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2618    if (historyAddress != null) {
2619      conf.set("mapreduce.jobhistory.address", historyAddress);
2620    }
2621    String schedulerAddress = jobConf.get("yarn.resourcemanager.scheduler.address");
2622    if (schedulerAddress != null) {
2623      conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2624    }
2625    String mrJobHistoryWebappAddress = jobConf.get("mapreduce.jobhistory.webapp.address");
2626    if (mrJobHistoryWebappAddress != null) {
2627      conf.set("mapreduce.jobhistory.webapp.address", mrJobHistoryWebappAddress);
2628    }
2629    String yarnRMWebappAddress = jobConf.get("yarn.resourcemanager.webapp.address");
2630    if (yarnRMWebappAddress != null) {
2631      conf.set("yarn.resourcemanager.webapp.address", yarnRMWebappAddress);
2632    }
2633  }
2634
2635  /**
2636   * Stops the previously started <code>MiniMRCluster</code>.
2637   */
2638  public void shutdownMiniMapReduceCluster() {
2639    if (mrCluster != null) {
2640      LOG.info("Stopping mini mapreduce cluster...");
2641      mrCluster.shutdown();
2642      mrCluster = null;
2643      LOG.info("Mini mapreduce cluster stopped");
2644    }
2645    // Restore configuration to point to local jobtracker
2646    conf.set("mapreduce.jobtracker.address", "local");
2647  }
2648
2649  /**
2650   * Create a stubbed out RegionServerService, mainly for getting FS.
2651   */
2652  public RegionServerServices createMockRegionServerService() throws IOException {
2653    return createMockRegionServerService((ServerName) null);
2654  }
2655
2656  /**
2657   * Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
2658   * TestTokenAuthentication
2659   */
2660  public RegionServerServices createMockRegionServerService(RpcServerInterface rpc)
2661    throws IOException {
2662    final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2663    rss.setFileSystem(getTestFileSystem());
2664    rss.setRpcServer(rpc);
2665    return rss;
2666  }
2667
2668  /**
2669   * Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
2670   * TestOpenRegionHandler
2671   */
2672  public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2673    final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2674    rss.setFileSystem(getTestFileSystem());
2675    return rss;
2676  }
2677
2678  /**
2679   * Switches the logger for the given class to DEBUG level.
2680   * @param clazz The class for which to switch to debug logging.
2681   * @deprecated In 2.3.0, will be removed in 4.0.0. Only support changing log level on log4j now as
2682   *             HBase only uses log4j. You should do this by your own as it you know which log
2683   *             framework you are using then set the log level to debug is very easy.
2684   */
2685  @Deprecated
2686  public void enableDebug(Class<?> clazz) {
2687    Log4jUtils.enableDebug(clazz);
2688  }
2689
2690  /**
2691   * Expire the Master's session n
2692   */
2693  public void expireMasterSession() throws Exception {
2694    HMaster master = getMiniHBaseCluster().getMaster();
2695    expireSession(master.getZooKeeper(), false);
2696  }
2697
2698  /**
2699   * Expire a region server's session
2700   * @param index which RS
2701   */
2702  public void expireRegionServerSession(int index) throws Exception {
2703    HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2704    expireSession(rs.getZooKeeper(), false);
2705    decrementMinRegionServerCount();
2706  }
2707
2708  private void decrementMinRegionServerCount() {
2709    // decrement the count for this.conf, for newly spwaned master
2710    // this.hbaseCluster shares this configuration too
2711    decrementMinRegionServerCount(getConfiguration());
2712
2713    // each master thread keeps a copy of configuration
2714    for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2715      decrementMinRegionServerCount(master.getMaster().getConfiguration());
2716    }
2717  }
2718
2719  private void decrementMinRegionServerCount(Configuration conf) {
2720    int currentCount = conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2721    if (currentCount != -1) {
2722      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, Math.max(currentCount - 1, 1));
2723    }
2724  }
2725
2726  public void expireSession(ZKWatcher nodeZK) throws Exception {
2727    expireSession(nodeZK, false);
2728  }
2729
2730  /**
2731   * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2732   * http://hbase.apache.org/book.html#trouble.zookeeper There are issues when doing this: [1]
2733   * http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html [2]
2734   * https://issues.apache.org/jira/browse/ZOOKEEPER-1105
2735   * @param nodeZK      - the ZK watcher to expire
2736   * @param checkStatus - true to check if we can create a Table with the current configuration.
2737   */
2738  public void expireSession(ZKWatcher nodeZK, boolean checkStatus) throws Exception {
2739    Configuration c = new Configuration(this.conf);
2740    String quorumServers = ZKConfig.getZKQuorumServersString(c);
2741    ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2742    byte[] password = zk.getSessionPasswd();
2743    long sessionID = zk.getSessionId();
2744
2745    // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2746    // so we create a first watcher to be sure that the
2747    // event was sent. We expect that if our watcher receives the event
2748    // other watchers on the same machine will get is as well.
2749    // When we ask to close the connection, ZK does not close it before
2750    // we receive all the events, so don't have to capture the event, just
2751    // closing the connection should be enough.
2752    ZooKeeper monitor = new ZooKeeper(quorumServers, 1000, new org.apache.zookeeper.Watcher() {
2753      @Override
2754      public void process(WatchedEvent watchedEvent) {
2755        LOG.info("Monitor ZKW received event=" + watchedEvent);
2756      }
2757    }, sessionID, password);
2758
2759    // Making it expire
2760    ZooKeeper newZK =
2761      new ZooKeeper(quorumServers, 1000, EmptyWatcher.instance, sessionID, password);
2762
2763    // ensure that we have connection to the server before closing down, otherwise
2764    // the close session event will be eaten out before we start CONNECTING state
2765    long start = EnvironmentEdgeManager.currentTime();
2766    while (
2767      newZK.getState() != States.CONNECTED && EnvironmentEdgeManager.currentTime() - start < 1000
2768    ) {
2769      Thread.sleep(1);
2770    }
2771    newZK.close();
2772    LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2773
2774    // Now closing & waiting to be sure that the clients get it.
2775    monitor.close();
2776
2777    if (checkStatus) {
2778      getConnection().getTable(TableName.META_TABLE_NAME).close();
2779    }
2780  }
2781
2782  /**
2783   * Get the Mini HBase cluster.
2784   * @return hbase cluster
2785   * @see #getHBaseClusterInterface()
2786   */
2787  public MiniHBaseCluster getHBaseCluster() {
2788    return getMiniHBaseCluster();
2789  }
2790
2791  /**
2792   * Returns the HBaseCluster instance.
2793   * <p>
2794   * Returned object can be any of the subclasses of HBaseCluster, and the tests referring this
2795   * should not assume that the cluster is a mini cluster or a distributed one. If the test only
2796   * works on a mini cluster, then specific method {@link #getMiniHBaseCluster()} can be used
2797   * instead w/o the need to type-cast.
2798   */
2799  public HBaseCluster getHBaseClusterInterface() {
2800    // implementation note: we should rename this method as #getHBaseCluster(),
2801    // but this would require refactoring 90+ calls.
2802    return hbaseCluster;
2803  }
2804
2805  /**
2806   * Resets the connections so that the next time getConnection() is called, a new connection is
2807   * created. This is needed in cases where the entire cluster / all the masters are shutdown and
2808   * the connection is not valid anymore. TODO: There should be a more coherent way of doing this.
2809   * Unfortunately the way tests are written, not all start() stop() calls go through this class.
2810   * Most tests directly operate on the underlying mini/local hbase cluster. That makes it difficult
2811   * for this wrapper class to maintain the connection state automatically. Cleaning this is a much
2812   * bigger refactor.
2813   */
2814  public void invalidateConnection() throws IOException {
2815    closeConnection();
2816    // Update the master addresses if they changed.
2817    final String masterConfigBefore = conf.get(HConstants.MASTER_ADDRS_KEY);
2818    final String masterConfAfter = getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY);
2819    LOG.info("Invalidated connection. Updating master addresses before: {} after: {}",
2820      masterConfigBefore, masterConfAfter);
2821    conf.set(HConstants.MASTER_ADDRS_KEY,
2822      getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY));
2823  }
2824
2825  /**
2826   * Get a shared Connection to the cluster. this method is thread safe.
2827   * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster.
2828   */
2829  public Connection getConnection() throws IOException {
2830    return getAsyncConnection().toConnection();
2831  }
2832
2833  /**
2834   * Get a assigned Connection to the cluster. this method is thread safe.
2835   * @param user assigned user
2836   * @return A Connection with assigned user.
2837   */
2838  public Connection getConnection(User user) throws IOException {
2839    return getAsyncConnection(user).toConnection();
2840  }
2841
2842  /**
2843   * Get a shared AsyncClusterConnection to the cluster. this method is thread safe.
2844   * @return An AsyncClusterConnection that can be shared. Don't close. Will be closed on shutdown
2845   *         of cluster.
2846   */
2847  public AsyncClusterConnection getAsyncConnection() throws IOException {
2848    try {
2849      return asyncConnection.updateAndGet(connection -> {
2850        if (connection == null) {
2851          try {
2852            User user = UserProvider.instantiate(conf).getCurrent();
2853            connection = getAsyncConnection(user);
2854          } catch (IOException ioe) {
2855            throw new UncheckedIOException("Failed to create connection", ioe);
2856          }
2857        }
2858        return connection;
2859      });
2860    } catch (UncheckedIOException exception) {
2861      throw exception.getCause();
2862    }
2863  }
2864
2865  /**
2866   * Get a assigned AsyncClusterConnection to the cluster. this method is thread safe.
2867   * @param user assigned user
2868   * @return An AsyncClusterConnection with assigned user.
2869   */
2870  public AsyncClusterConnection getAsyncConnection(User user) throws IOException {
2871    return ClusterConnectionFactory.createAsyncClusterConnection(conf, null, user);
2872  }
2873
2874  public void closeConnection() throws IOException {
2875    if (hbaseAdmin != null) {
2876      Closeables.close(hbaseAdmin, true);
2877      hbaseAdmin = null;
2878    }
2879    AsyncClusterConnection asyncConnection = this.asyncConnection.getAndSet(null);
2880    if (asyncConnection != null) {
2881      Closeables.close(asyncConnection, true);
2882    }
2883  }
2884
2885  /**
2886   * Returns an Admin instance which is shared between HBaseTestingUtility instance users. Closing
2887   * it has no effect, it will be closed automatically when the cluster shutdowns
2888   */
2889  public Admin getAdmin() throws IOException {
2890    if (hbaseAdmin == null) {
2891      this.hbaseAdmin = getConnection().getAdmin();
2892    }
2893    return hbaseAdmin;
2894  }
2895
2896  private Admin hbaseAdmin = null;
2897
2898  /**
2899   * Returns an {@link Hbck} instance. Needs be closed when done.
2900   */
2901  public Hbck getHbck() throws IOException {
2902    return getConnection().getHbck();
2903  }
2904
2905  /**
2906   * Unassign the named region.
2907   * @param regionName The region to unassign.
2908   */
2909  public void unassignRegion(String regionName) throws IOException {
2910    unassignRegion(Bytes.toBytes(regionName));
2911  }
2912
2913  /**
2914   * Unassign the named region.
2915   * @param regionName The region to unassign.
2916   */
2917  public void unassignRegion(byte[] regionName) throws IOException {
2918    getAdmin().unassign(regionName, true);
2919  }
2920
2921  /**
2922   * Closes the region containing the given row.
2923   * @param row   The row to find the containing region.
2924   * @param table The table to find the region.
2925   */
2926  public void unassignRegionByRow(String row, RegionLocator table) throws IOException {
2927    unassignRegionByRow(Bytes.toBytes(row), table);
2928  }
2929
2930  /**
2931   * Closes the region containing the given row.
2932   * @param row   The row to find the containing region.
2933   * @param table The table to find the region. n
2934   */
2935  public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOException {
2936    HRegionLocation hrl = table.getRegionLocation(row);
2937    unassignRegion(hrl.getRegion().getRegionName());
2938  }
2939
2940  /**
2941   * Retrieves a splittable region randomly from tableName
2942   * @param tableName   name of table
2943   * @param maxAttempts maximum number of attempts, unlimited for value of -1
2944   * @return the HRegion chosen, null if none was found within limit of maxAttempts
2945   */
2946  public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2947    List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2948    int regCount = regions.size();
2949    Set<Integer> attempted = new HashSet<>();
2950    int idx;
2951    int attempts = 0;
2952    do {
2953      regions = getHBaseCluster().getRegions(tableName);
2954      if (regCount != regions.size()) {
2955        // if there was region movement, clear attempted Set
2956        attempted.clear();
2957      }
2958      regCount = regions.size();
2959      // There are chances that before we get the region for the table from an RS the region may
2960      // be going for CLOSE. This may be because online schema change is enabled
2961      if (regCount > 0) {
2962        idx = ThreadLocalRandom.current().nextInt(regCount);
2963        // if we have just tried this region, there is no need to try again
2964        if (attempted.contains(idx)) {
2965          continue;
2966        }
2967        HRegion region = regions.get(idx);
2968        if (region.checkSplit().isPresent()) {
2969          return region;
2970        }
2971        attempted.add(idx);
2972      }
2973      attempts++;
2974    } while (maxAttempts == -1 || attempts < maxAttempts);
2975    return null;
2976  }
2977
2978  public MiniDFSCluster getDFSCluster() {
2979    return dfsCluster;
2980  }
2981
2982  public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, IOException {
2983    setDFSCluster(cluster, true);
2984  }
2985
2986  /**
2987   * Set the MiniDFSCluster
2988   * @param cluster     cluster to use
2989   * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before it
2990   *                    is set.
2991   * @throws IllegalStateException if the passed cluster is up when it is required to be down
2992   * @throws IOException           if the FileSystem could not be set from the passed dfs cluster
2993   */
2994  public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown)
2995    throws IllegalStateException, IOException {
2996    if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) {
2997      throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
2998    }
2999    this.dfsCluster = cluster;
3000    this.setFs();
3001  }
3002
3003  public FileSystem getTestFileSystem() throws IOException {
3004    return HFileSystem.get(conf);
3005  }
3006
3007  /**
3008   * Wait until all regions in a table have been assigned. Waits default timeout before giving up
3009   * (30 seconds).
3010   * @param table Table to wait on. nn
3011   */
3012  public void waitTableAvailable(TableName table) throws InterruptedException, IOException {
3013    waitTableAvailable(table.getName(), 30000);
3014  }
3015
3016  public void waitTableAvailable(TableName table, long timeoutMillis)
3017    throws InterruptedException, IOException {
3018    waitFor(timeoutMillis, predicateTableAvailable(table));
3019  }
3020
3021  /**
3022   * Wait until all regions in a table have been assigned
3023   * @param table         Table to wait on.
3024   * @param timeoutMillis Timeout.
3025   */
3026  public void waitTableAvailable(byte[] table, long timeoutMillis)
3027    throws InterruptedException, IOException {
3028    waitFor(timeoutMillis, predicateTableAvailable(TableName.valueOf(table)));
3029  }
3030
3031  public String explainTableAvailability(TableName tableName) throws IOException {
3032    StringBuilder msg =
3033      new StringBuilder(explainTableState(tableName, TableState.State.ENABLED)).append(", ");
3034    if (getHBaseCluster().getMaster().isAlive()) {
3035      Map<RegionInfo, ServerName> assignments = getHBaseCluster().getMaster().getAssignmentManager()
3036        .getRegionStates().getRegionAssignments();
3037      final List<Pair<RegionInfo, ServerName>> metaLocations =
3038        MetaTableAccessor.getTableRegionsAndLocations(getConnection(), tableName);
3039      for (Pair<RegionInfo, ServerName> metaLocation : metaLocations) {
3040        RegionInfo hri = metaLocation.getFirst();
3041        ServerName sn = metaLocation.getSecond();
3042        if (!assignments.containsKey(hri)) {
3043          msg.append(", region ").append(hri)
3044            .append(" not assigned, but found in meta, it expected to be on ").append(sn);
3045        } else if (sn == null) {
3046          msg.append(",  region ").append(hri).append(" assigned,  but has no server in meta");
3047        } else if (!sn.equals(assignments.get(hri))) {
3048          msg.append(",  region ").append(hri)
3049            .append(" assigned,  but has different servers in meta and AM ( ").append(sn)
3050            .append(" <> ").append(assignments.get(hri));
3051        }
3052      }
3053    }
3054    return msg.toString();
3055  }
3056
3057  public String explainTableState(final TableName table, TableState.State state)
3058    throws IOException {
3059    TableState tableState = MetaTableAccessor.getTableState(getConnection(), table);
3060    if (tableState == null) {
3061      return "TableState in META: No table state in META for table " + table
3062        + " last state in meta (including deleted is " + findLastTableState(table) + ")";
3063    } else if (!tableState.inStates(state)) {
3064      return "TableState in META: Not " + state + " state, but " + tableState;
3065    } else {
3066      return "TableState in META: OK";
3067    }
3068  }
3069
3070  public TableState findLastTableState(final TableName table) throws IOException {
3071    final AtomicReference<TableState> lastTableState = new AtomicReference<>(null);
3072    ClientMetaTableAccessor.Visitor visitor = new ClientMetaTableAccessor.Visitor() {
3073      @Override
3074      public boolean visit(Result r) throws IOException {
3075        if (!Arrays.equals(r.getRow(), table.getName())) {
3076          return false;
3077        }
3078        TableState state = CatalogFamilyFormat.getTableState(r);
3079        if (state != null) {
3080          lastTableState.set(state);
3081        }
3082        return true;
3083      }
3084    };
3085    MetaTableAccessor.scanMeta(getConnection(), null, null, ClientMetaTableAccessor.QueryType.TABLE,
3086      Integer.MAX_VALUE, visitor);
3087    return lastTableState.get();
3088  }
3089
3090  /**
3091   * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the regions
3092   * have been all assigned. Will timeout after default period (30 seconds) Tolerates nonexistent
3093   * table.
3094   * @param table the table to wait on.
3095   * @throws InterruptedException if interrupted while waiting
3096   * @throws IOException          if an IO problem is encountered
3097   */
3098  public void waitTableEnabled(TableName table) throws InterruptedException, IOException {
3099    waitTableEnabled(table, 30000);
3100  }
3101
3102  /**
3103   * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the regions
3104   * have been all assigned.
3105   * @see #waitTableEnabled(TableName, long)
3106   * @param table         Table to wait on.
3107   * @param timeoutMillis Time to wait on it being marked enabled. nn
3108   */
3109  public void waitTableEnabled(byte[] table, long timeoutMillis)
3110    throws InterruptedException, IOException {
3111    waitTableEnabled(TableName.valueOf(table), timeoutMillis);
3112  }
3113
3114  public void waitTableEnabled(TableName table, long timeoutMillis) throws IOException {
3115    waitFor(timeoutMillis, predicateTableEnabled(table));
3116  }
3117
3118  /**
3119   * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' Will timeout
3120   * after default period (30 seconds)
3121   * @param table Table to wait on. nn
3122   */
3123  public void waitTableDisabled(byte[] table) throws InterruptedException, IOException {
3124    waitTableDisabled(table, 30000);
3125  }
3126
3127  public void waitTableDisabled(TableName table, long millisTimeout)
3128    throws InterruptedException, IOException {
3129    waitFor(millisTimeout, predicateTableDisabled(table));
3130  }
3131
3132  /**
3133   * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled'
3134   * @param table         Table to wait on.
3135   * @param timeoutMillis Time to wait on it being marked disabled. nn
3136   */
3137  public void waitTableDisabled(byte[] table, long timeoutMillis)
3138    throws InterruptedException, IOException {
3139    waitTableDisabled(TableName.valueOf(table), timeoutMillis);
3140  }
3141
3142  /**
3143   * Make sure that at least the specified number of region servers are running
3144   * @param num minimum number of region servers that should be running
3145   * @return true if we started some servers n
3146   */
3147  public boolean ensureSomeRegionServersAvailable(final int num) throws IOException {
3148    boolean startedServer = false;
3149    MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
3150    for (int i = hbaseCluster.getLiveRegionServerThreads().size(); i < num; ++i) {
3151      LOG.info("Started new server=" + hbaseCluster.startRegionServer());
3152      startedServer = true;
3153    }
3154
3155    return startedServer;
3156  }
3157
3158  /**
3159   * Make sure that at least the specified number of region servers are running. We don't count the
3160   * ones that are currently stopping or are stopped.
3161   * @param num minimum number of region servers that should be running
3162   * @return true if we started some servers n
3163   */
3164  public boolean ensureSomeNonStoppedRegionServersAvailable(final int num) throws IOException {
3165    boolean startedServer = ensureSomeRegionServersAvailable(num);
3166
3167    int nonStoppedServers = 0;
3168    for (JVMClusterUtil.RegionServerThread rst : getMiniHBaseCluster().getRegionServerThreads()) {
3169
3170      HRegionServer hrs = rst.getRegionServer();
3171      if (hrs.isStopping() || hrs.isStopped()) {
3172        LOG.info("A region server is stopped or stopping:" + hrs);
3173      } else {
3174        nonStoppedServers++;
3175      }
3176    }
3177    for (int i = nonStoppedServers; i < num; ++i) {
3178      LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
3179      startedServer = true;
3180    }
3181    return startedServer;
3182  }
3183
3184  /**
3185   * This method clones the passed <code>c</code> configuration setting a new user into the clone.
3186   * Use it getting new instances of FileSystem. Only works for DistributedFileSystem w/o Kerberos.
3187   * @param c                     Initial configuration
3188   * @param differentiatingSuffix Suffix to differentiate this user from others.
3189   * @return A new configuration instance with a different user set into it. n
3190   */
3191  public static User getDifferentUser(final Configuration c, final String differentiatingSuffix)
3192    throws IOException {
3193    FileSystem currentfs = FileSystem.get(c);
3194    if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) {
3195      return User.getCurrent();
3196    }
3197    // Else distributed filesystem. Make a new instance per daemon. Below
3198    // code is taken from the AppendTestUtil over in hdfs.
3199    String username = User.getCurrent().getName() + differentiatingSuffix;
3200    User user = User.createUserForTesting(c, username, new String[] { "supergroup" });
3201    return user;
3202  }
3203
3204  public static NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster)
3205    throws IOException {
3206    NavigableSet<String> online = new TreeSet<>();
3207    for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
3208      try {
3209        for (RegionInfo region : ProtobufUtil
3210          .getOnlineRegions(rst.getRegionServer().getRSRpcServices())) {
3211          online.add(region.getRegionNameAsString());
3212        }
3213      } catch (RegionServerStoppedException e) {
3214        // That's fine.
3215      }
3216    }
3217    return online;
3218  }
3219
3220  /**
3221   * Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and makes tests
3222   * linger. Here is the exception you'll see:
3223   *
3224   * <pre>
3225   * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block
3226   * blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
3227   * blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683
3228   * failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
3229   * </pre>
3230   *
3231   * @param stream A DFSClient.DFSOutputStream. nnnnn
3232   */
3233  public static void setMaxRecoveryErrorCount(final OutputStream stream, final int max) {
3234    try {
3235      Class<?>[] clazzes = DFSClient.class.getDeclaredClasses();
3236      for (Class<?> clazz : clazzes) {
3237        String className = clazz.getSimpleName();
3238        if (className.equals("DFSOutputStream")) {
3239          if (clazz.isInstance(stream)) {
3240            Field maxRecoveryErrorCountField =
3241              stream.getClass().getDeclaredField("maxRecoveryErrorCount");
3242            maxRecoveryErrorCountField.setAccessible(true);
3243            maxRecoveryErrorCountField.setInt(stream, max);
3244            break;
3245          }
3246        }
3247      }
3248    } catch (Exception e) {
3249      LOG.info("Could not set max recovery field", e);
3250    }
3251  }
3252
3253  /**
3254   * Uses directly the assignment manager to assign the region. and waits until the specified region
3255   * has completed assignment.
3256   * @return true if the region is assigned false otherwise.
3257   */
3258  public boolean assignRegion(final RegionInfo regionInfo)
3259    throws IOException, InterruptedException {
3260    final AssignmentManager am = getHBaseCluster().getMaster().getAssignmentManager();
3261    am.assign(regionInfo);
3262    return AssignmentTestingUtil.waitForAssignment(am, regionInfo);
3263  }
3264
3265  /**
3266   * Move region to destination server and wait till region is completely moved and online
3267   * @param destRegion region to move
3268   * @param destServer destination server of the region nn
3269   */
3270  public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer)
3271    throws InterruptedException, IOException {
3272    HMaster master = getMiniHBaseCluster().getMaster();
3273    // TODO: Here we start the move. The move can take a while.
3274    getAdmin().move(destRegion.getEncodedNameAsBytes(), destServer);
3275    while (true) {
3276      ServerName serverName =
3277        master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(destRegion);
3278      if (serverName != null && serverName.equals(destServer)) {
3279        assertRegionOnServer(destRegion, serverName, 2000);
3280        break;
3281      }
3282      Thread.sleep(10);
3283    }
3284  }
3285
3286  /**
3287   * Wait until all regions for a table in hbase:meta have a non-empty info:server, up to a
3288   * configuable timeout value (default is 60 seconds) This means all regions have been deployed,
3289   * master has been informed and updated hbase:meta with the regions deployed server.
3290   * @param tableName the table name n
3291   */
3292  public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
3293    waitUntilAllRegionsAssigned(tableName,
3294      this.conf.getLong("hbase.client.sync.wait.timeout.msec", 60000));
3295  }
3296
3297  /**
3298   * Waith until all system table's regions get assigned n
3299   */
3300  public void waitUntilAllSystemRegionsAssigned() throws IOException {
3301    waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
3302  }
3303
3304  /**
3305   * Wait until all regions for a table in hbase:meta have a non-empty info:server, or until
3306   * timeout. This means all regions have been deployed, master has been informed and updated
3307   * hbase:meta with the regions deployed server.
3308   * @param tableName the table name
3309   * @param timeout   timeout, in milliseconds n
3310   */
3311  public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
3312    throws IOException {
3313    if (!TableName.isMetaTableName(tableName)) {
3314      try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) {
3315        LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = "
3316          + timeout + "ms");
3317        waitFor(timeout, 200, true, new ExplainingPredicate<IOException>() {
3318          @Override
3319          public String explainFailure() throws IOException {
3320            return explainTableAvailability(tableName);
3321          }
3322
3323          @Override
3324          public boolean evaluate() throws IOException {
3325            Scan scan = new Scan();
3326            scan.addFamily(HConstants.CATALOG_FAMILY);
3327            boolean tableFound = false;
3328            try (ResultScanner s = meta.getScanner(scan)) {
3329              for (Result r; (r = s.next()) != null;) {
3330                byte[] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
3331                RegionInfo info = RegionInfo.parseFromOrNull(b);
3332                if (info != null && info.getTable().equals(tableName)) {
3333                  // Get server hosting this region from catalog family. Return false if no server
3334                  // hosting this region, or if the server hosting this region was recently killed
3335                  // (for fault tolerance testing).
3336                  tableFound = true;
3337                  byte[] server =
3338                    r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
3339                  if (server == null) {
3340                    return false;
3341                  } else {
3342                    byte[] startCode =
3343                      r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
3344                    ServerName serverName =
3345                      ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + ","
3346                        + Bytes.toLong(startCode));
3347                    if (
3348                      !getHBaseClusterInterface().isDistributedCluster()
3349                        && getHBaseCluster().isKilledRS(serverName)
3350                    ) {
3351                      return false;
3352                    }
3353                  }
3354                  if (RegionStateStore.getRegionState(r, info) != RegionState.State.OPEN) {
3355                    return false;
3356                  }
3357                }
3358              }
3359            }
3360            if (!tableFound) {
3361              LOG.warn(
3362                "Didn't find the entries for table " + tableName + " in meta, already deleted?");
3363            }
3364            return tableFound;
3365          }
3366        });
3367      }
3368    }
3369    LOG.info("All regions for table " + tableName + " assigned to meta. Checking AM states.");
3370    // check from the master state if we are using a mini cluster
3371    if (!getHBaseClusterInterface().isDistributedCluster()) {
3372      // So, all regions are in the meta table but make sure master knows of the assignments before
3373      // returning -- sometimes this can lag.
3374      HMaster master = getHBaseCluster().getMaster();
3375      final RegionStates states = master.getAssignmentManager().getRegionStates();
3376      waitFor(timeout, 200, new ExplainingPredicate<IOException>() {
3377        @Override
3378        public String explainFailure() throws IOException {
3379          return explainTableAvailability(tableName);
3380        }
3381
3382        @Override
3383        public boolean evaluate() throws IOException {
3384          List<RegionInfo> hris = states.getRegionsOfTable(tableName);
3385          return hris != null && !hris.isEmpty();
3386        }
3387      });
3388    }
3389    LOG.info("All regions for table " + tableName + " assigned.");
3390  }
3391
3392  /**
3393   * Do a small get/scan against one store. This is required because store has no actual methods of
3394   * querying itself, and relies on StoreScanner.
3395   */
3396  public static List<Cell> getFromStoreFile(HStore store, Get get) throws IOException {
3397    Scan scan = new Scan(get);
3398    InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3399      scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()),
3400      // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set
3401      // readpoint 0.
3402      0);
3403
3404    List<Cell> result = new ArrayList<>();
3405    scanner.next(result);
3406    if (!result.isEmpty()) {
3407      // verify that we are on the row we want:
3408      Cell kv = result.get(0);
3409      if (!CellUtil.matchingRows(kv, get.getRow())) {
3410        result.clear();
3411      }
3412    }
3413    scanner.close();
3414    return result;
3415  }
3416
3417  /**
3418   * Create region split keys between startkey and endKey nn * @param numRegions the number of
3419   * regions to be created. it has to be greater than 3.
3420   * @return resulting split keys
3421   */
3422  public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions) {
3423    if (numRegions <= 3) {
3424      throw new AssertionError();
3425    }
3426    byte[][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3427    byte[][] result = new byte[tmpSplitKeys.length + 1][];
3428    System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3429    result[0] = HConstants.EMPTY_BYTE_ARRAY;
3430    return result;
3431  }
3432
3433  /**
3434   * Do a small get/scan against one store. This is required because store has no actual methods of
3435   * querying itself, and relies on StoreScanner.
3436   */
3437  public static List<Cell> getFromStoreFile(HStore store, byte[] row, NavigableSet<byte[]> columns)
3438    throws IOException {
3439    Get get = new Get(row);
3440    Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3441    s.put(store.getColumnFamilyDescriptor().getName(), columns);
3442
3443    return getFromStoreFile(store, get);
3444  }
3445
3446  public static void assertKVListsEqual(String additionalMsg, final List<? extends Cell> expected,
3447    final List<? extends Cell> actual) {
3448    final int eLen = expected.size();
3449    final int aLen = actual.size();
3450    final int minLen = Math.min(eLen, aLen);
3451
3452    int i;
3453    for (i = 0; i < minLen
3454      && CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0; ++i) {
3455    }
3456
3457    if (additionalMsg == null) {
3458      additionalMsg = "";
3459    }
3460    if (!additionalMsg.isEmpty()) {
3461      additionalMsg = ". " + additionalMsg;
3462    }
3463
3464    if (eLen != aLen || i != minLen) {
3465      throw new AssertionError("Expected and actual KV arrays differ at position " + i + ": "
3466        + safeGetAsStr(expected, i) + " (length " + eLen + ") vs. " + safeGetAsStr(actual, i)
3467        + " (length " + aLen + ")" + additionalMsg);
3468    }
3469  }
3470
3471  public static <T> String safeGetAsStr(List<T> lst, int i) {
3472    if (0 <= i && i < lst.size()) {
3473      return lst.get(i).toString();
3474    } else {
3475      return "<out_of_range>";
3476    }
3477  }
3478
3479  public String getClusterKey() {
3480    return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT)
3481      + ":"
3482      + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3483  }
3484
3485  /** Creates a random table with the given parameters */
3486  public Table createRandomTable(TableName tableName, final Collection<String> families,
3487    final int maxVersions, final int numColsPerRow, final int numFlushes, final int numRegions,
3488    final int numRowsPerFlush) throws IOException, InterruptedException {
3489
3490    LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + " regions, "
3491      + numFlushes + " storefiles per region, " + numRowsPerFlush + " rows per flush, maxVersions="
3492      + maxVersions + "\n");
3493
3494    final int numCF = families.size();
3495    final byte[][] cfBytes = new byte[numCF][];
3496    {
3497      int cfIndex = 0;
3498      for (String cf : families) {
3499        cfBytes[cfIndex++] = Bytes.toBytes(cf);
3500      }
3501    }
3502
3503    final int actualStartKey = 0;
3504    final int actualEndKey = Integer.MAX_VALUE;
3505    final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3506    final int splitStartKey = actualStartKey + keysPerRegion;
3507    final int splitEndKey = actualEndKey - keysPerRegion;
3508    final String keyFormat = "%08x";
3509    final Table table = createTable(tableName, cfBytes, maxVersions,
3510      Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3511      Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions);
3512
3513    if (hbaseCluster != null) {
3514      getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3515    }
3516
3517    BufferedMutator mutator = getConnection().getBufferedMutator(tableName);
3518
3519    final Random rand = ThreadLocalRandom.current();
3520    for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3521      for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3522        final byte[] row = Bytes.toBytes(
3523          String.format(keyFormat, actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3524
3525        Put put = new Put(row);
3526        Delete del = new Delete(row);
3527        for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3528          final byte[] cf = cfBytes[rand.nextInt(numCF)];
3529          final long ts = rand.nextInt();
3530          final byte[] qual = Bytes.toBytes("col" + iCol);
3531          if (rand.nextBoolean()) {
3532            final byte[] value =
3533              Bytes.toBytes("value_for_row_" + iRow + "_cf_" + Bytes.toStringBinary(cf) + "_col_"
3534                + iCol + "_ts_" + ts + "_random_" + rand.nextLong());
3535            put.addColumn(cf, qual, ts, value);
3536          } else if (rand.nextDouble() < 0.8) {
3537            del.addColumn(cf, qual, ts);
3538          } else {
3539            del.addColumns(cf, qual, ts);
3540          }
3541        }
3542
3543        if (!put.isEmpty()) {
3544          mutator.mutate(put);
3545        }
3546
3547        if (!del.isEmpty()) {
3548          mutator.mutate(del);
3549        }
3550      }
3551      LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3552      mutator.flush();
3553      if (hbaseCluster != null) {
3554        getMiniHBaseCluster().flushcache(table.getName());
3555      }
3556    }
3557    mutator.close();
3558
3559    return table;
3560  }
3561
3562  public static int randomFreePort() {
3563    return HBaseCommonTestingUtility.randomFreePort();
3564  }
3565
3566  public static String randomMultiCastAddress() {
3567    return "226.1.1." + ThreadLocalRandom.current().nextInt(254);
3568  }
3569
3570  public static void waitForHostPort(String host, int port) throws IOException {
3571    final int maxTimeMs = 10000;
3572    final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3573    IOException savedException = null;
3574    LOG.info("Waiting for server at " + host + ":" + port);
3575    for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3576      try {
3577        Socket sock = new Socket(InetAddress.getByName(host), port);
3578        sock.close();
3579        savedException = null;
3580        LOG.info("Server at " + host + ":" + port + " is available");
3581        break;
3582      } catch (UnknownHostException e) {
3583        throw new IOException("Failed to look up " + host, e);
3584      } catch (IOException e) {
3585        savedException = e;
3586      }
3587      Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3588    }
3589
3590    if (savedException != null) {
3591      throw savedException;
3592    }
3593  }
3594
3595  /**
3596   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3597   * continues.
3598   * @return the number of regions the table was split into
3599   */
3600  public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName,
3601    byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding)
3602    throws IOException {
3603    return createPreSplitLoadTestTable(conf, tableName, columnFamily, compression,
3604      dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1, Durability.USE_DEFAULT);
3605  }
3606
3607  /**
3608   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3609   * continues.
3610   * @return the number of regions the table was split into
3611   */
3612  public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName,
3613    byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding,
3614    int numRegionsPerServer, int regionReplication, Durability durability) throws IOException {
3615    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
3616    builder.setDurability(durability);
3617    builder.setRegionReplication(regionReplication);
3618    ColumnFamilyDescriptorBuilder cfBuilder =
3619      ColumnFamilyDescriptorBuilder.newBuilder(columnFamily);
3620    cfBuilder.setDataBlockEncoding(dataBlockEncoding);
3621    cfBuilder.setCompressionType(compression);
3622    return createPreSplitLoadTestTable(conf, builder.build(), cfBuilder.build(),
3623      numRegionsPerServer);
3624  }
3625
3626  /**
3627   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3628   * continues.
3629   * @return the number of regions the table was split into
3630   */
3631  public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName,
3632    byte[][] columnFamilies, Algorithm compression, DataBlockEncoding dataBlockEncoding,
3633    int numRegionsPerServer, int regionReplication, Durability durability) throws IOException {
3634    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
3635    builder.setDurability(durability);
3636    builder.setRegionReplication(regionReplication);
3637    ColumnFamilyDescriptor[] hcds = new ColumnFamilyDescriptor[columnFamilies.length];
3638    for (int i = 0; i < columnFamilies.length; i++) {
3639      ColumnFamilyDescriptorBuilder cfBuilder =
3640        ColumnFamilyDescriptorBuilder.newBuilder(columnFamilies[i]);
3641      cfBuilder.setDataBlockEncoding(dataBlockEncoding);
3642      cfBuilder.setCompressionType(compression);
3643      hcds[i] = cfBuilder.build();
3644    }
3645    return createPreSplitLoadTestTable(conf, builder.build(), hcds, numRegionsPerServer);
3646  }
3647
3648  /**
3649   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3650   * continues.
3651   * @return the number of regions the table was split into
3652   */
3653  public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc,
3654    ColumnFamilyDescriptor hcd) throws IOException {
3655    return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3656  }
3657
3658  /**
3659   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3660   * continues.
3661   * @return the number of regions the table was split into
3662   */
3663  public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc,
3664    ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException {
3665    return createPreSplitLoadTestTable(conf, desc, new ColumnFamilyDescriptor[] { hcd },
3666      numRegionsPerServer);
3667  }
3668
3669  /**
3670   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3671   * continues.
3672   * @return the number of regions the table was split into
3673   */
3674  public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc,
3675    ColumnFamilyDescriptor[] hcds, int numRegionsPerServer) throws IOException {
3676    return createPreSplitLoadTestTable(conf, desc, hcds, new RegionSplitter.HexStringSplit(),
3677      numRegionsPerServer);
3678  }
3679
3680  /**
3681   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3682   * continues.
3683   * @return the number of regions the table was split into
3684   */
3685  public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor td,
3686    ColumnFamilyDescriptor[] cds, SplitAlgorithm splitter, int numRegionsPerServer)
3687    throws IOException {
3688    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(td);
3689    for (ColumnFamilyDescriptor cd : cds) {
3690      if (!td.hasColumnFamily(cd.getName())) {
3691        builder.setColumnFamily(cd);
3692      }
3693    }
3694    td = builder.build();
3695    int totalNumberOfRegions = 0;
3696    Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
3697    Admin admin = unmanagedConnection.getAdmin();
3698
3699    try {
3700      // create a table a pre-splits regions.
3701      // The number of splits is set as:
3702      // region servers * regions per region server).
3703      int numberOfServers = admin.getRegionServers().size();
3704      if (numberOfServers == 0) {
3705        throw new IllegalStateException("No live regionservers");
3706      }
3707
3708      totalNumberOfRegions = numberOfServers * numRegionsPerServer;
3709      LOG.info("Number of live regionservers: " + numberOfServers + ", "
3710        + "pre-splitting table into " + totalNumberOfRegions + " regions " + "(regions per server: "
3711        + numRegionsPerServer + ")");
3712
3713      byte[][] splits = splitter.split(totalNumberOfRegions);
3714
3715      admin.createTable(td, splits);
3716    } catch (MasterNotRunningException e) {
3717      LOG.error("Master not running", e);
3718      throw new IOException(e);
3719    } catch (TableExistsException e) {
3720      LOG.warn("Table " + td.getTableName() + " already exists, continuing");
3721    } finally {
3722      admin.close();
3723      unmanagedConnection.close();
3724    }
3725    return totalNumberOfRegions;
3726  }
3727
3728  public static int getMetaRSPort(Connection connection) throws IOException {
3729    try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) {
3730      return locator.getRegionLocation(Bytes.toBytes("")).getPort();
3731    }
3732  }
3733
3734  /**
3735   * Due to async racing issue, a region may not be in the online region list of a region server
3736   * yet, after the assignment znode is deleted and the new assignment is recorded in master.
3737   */
3738  public void assertRegionOnServer(final RegionInfo hri, final ServerName server,
3739    final long timeout) throws IOException, InterruptedException {
3740    long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout;
3741    while (true) {
3742      List<RegionInfo> regions = getAdmin().getRegions(server);
3743      if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) return;
3744      long now = EnvironmentEdgeManager.currentTime();
3745      if (now > timeoutTime) break;
3746      Thread.sleep(10);
3747    }
3748    throw new AssertionError(
3749      "Could not find region " + hri.getRegionNameAsString() + " on server " + server);
3750  }
3751
3752  /**
3753   * Check to make sure the region is open on the specified region server, but not on any other one.
3754   */
3755  public void assertRegionOnlyOnServer(final RegionInfo hri, final ServerName server,
3756    final long timeout) throws IOException, InterruptedException {
3757    long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout;
3758    while (true) {
3759      List<RegionInfo> regions = getAdmin().getRegions(server);
3760      if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) {
3761        List<JVMClusterUtil.RegionServerThread> rsThreads =
3762          getHBaseCluster().getLiveRegionServerThreads();
3763        for (JVMClusterUtil.RegionServerThread rsThread : rsThreads) {
3764          HRegionServer rs = rsThread.getRegionServer();
3765          if (server.equals(rs.getServerName())) {
3766            continue;
3767          }
3768          Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3769          for (HRegion r : hrs) {
3770            if (r.getRegionInfo().getRegionId() == hri.getRegionId()) {
3771              throw new AssertionError("Region should not be double assigned");
3772            }
3773          }
3774        }
3775        return; // good, we are happy
3776      }
3777      long now = EnvironmentEdgeManager.currentTime();
3778      if (now > timeoutTime) break;
3779      Thread.sleep(10);
3780    }
3781    throw new AssertionError(
3782      "Could not find region " + hri.getRegionNameAsString() + " on server " + server);
3783  }
3784
3785  public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd) throws IOException {
3786    TableDescriptor td =
3787      TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build();
3788    RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build();
3789    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td);
3790  }
3791
3792  public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd,
3793    BlockCache blockCache) throws IOException {
3794    TableDescriptor td =
3795      TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build();
3796    RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build();
3797    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td, blockCache);
3798  }
3799
3800  public static void setFileSystemURI(String fsURI) {
3801    FS_URI = fsURI;
3802  }
3803
3804  /**
3805   * Returns a {@link Predicate} for checking that there are no regions in transition in master
3806   */
3807  public ExplainingPredicate<IOException> predicateNoRegionsInTransition() {
3808    return new ExplainingPredicate<IOException>() {
3809      @Override
3810      public String explainFailure() throws IOException {
3811        final RegionStates regionStates =
3812          getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
3813        return "found in transition: " + regionStates.getRegionsInTransition().toString();
3814      }
3815
3816      @Override
3817      public boolean evaluate() throws IOException {
3818        HMaster master = getMiniHBaseCluster().getMaster();
3819        if (master == null) return false;
3820        AssignmentManager am = master.getAssignmentManager();
3821        if (am == null) return false;
3822        return !am.hasRegionsInTransition();
3823      }
3824    };
3825  }
3826
3827  /**
3828   * Returns a {@link Predicate} for checking that table is enabled
3829   */
3830  public Waiter.Predicate<IOException> predicateTableEnabled(final TableName tableName) {
3831    return new ExplainingPredicate<IOException>() {
3832      @Override
3833      public String explainFailure() throws IOException {
3834        return explainTableState(tableName, TableState.State.ENABLED);
3835      }
3836
3837      @Override
3838      public boolean evaluate() throws IOException {
3839        return getAdmin().tableExists(tableName) && getAdmin().isTableEnabled(tableName);
3840      }
3841    };
3842  }
3843
3844  /**
3845   * Returns a {@link Predicate} for checking that table is enabled
3846   */
3847  public Waiter.Predicate<IOException> predicateTableDisabled(final TableName tableName) {
3848    return new ExplainingPredicate<IOException>() {
3849      @Override
3850      public String explainFailure() throws IOException {
3851        return explainTableState(tableName, TableState.State.DISABLED);
3852      }
3853
3854      @Override
3855      public boolean evaluate() throws IOException {
3856        return getAdmin().isTableDisabled(tableName);
3857      }
3858    };
3859  }
3860
3861  /**
3862   * Returns a {@link Predicate} for checking that table is enabled
3863   */
3864  public Waiter.Predicate<IOException> predicateTableAvailable(final TableName tableName) {
3865    return new ExplainingPredicate<IOException>() {
3866      @Override
3867      public String explainFailure() throws IOException {
3868        return explainTableAvailability(tableName);
3869      }
3870
3871      @Override
3872      public boolean evaluate() throws IOException {
3873        boolean tableAvailable = getAdmin().isTableAvailable(tableName);
3874        if (tableAvailable) {
3875          try (Table table = getConnection().getTable(tableName)) {
3876            TableDescriptor htd = table.getDescriptor();
3877            for (HRegionLocation loc : getConnection().getRegionLocator(tableName)
3878              .getAllRegionLocations()) {
3879              Scan scan = new Scan().withStartRow(loc.getRegion().getStartKey())
3880                .withStopRow(loc.getRegion().getEndKey()).setOneRowLimit()
3881                .setMaxResultsPerColumnFamily(1).setCacheBlocks(false);
3882              for (byte[] family : htd.getColumnFamilyNames()) {
3883                scan.addFamily(family);
3884              }
3885              try (ResultScanner scanner = table.getScanner(scan)) {
3886                scanner.next();
3887              }
3888            }
3889          }
3890        }
3891        return tableAvailable;
3892      }
3893    };
3894  }
3895
3896  /**
3897   * Wait until no regions in transition.
3898   * @param timeout How long to wait. n
3899   */
3900  public void waitUntilNoRegionsInTransition(final long timeout) throws IOException {
3901    waitFor(timeout, predicateNoRegionsInTransition());
3902  }
3903
3904  /**
3905   * Wait until no regions in transition. (time limit 15min) n
3906   */
3907  public void waitUntilNoRegionsInTransition() throws IOException {
3908    waitUntilNoRegionsInTransition(15 * 60000);
3909  }
3910
3911  /**
3912   * Wait until labels is ready in VisibilityLabelsCache. nn
3913   */
3914  public void waitLabelAvailable(long timeoutMillis, final String... labels) {
3915    final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get();
3916    waitFor(timeoutMillis, new Waiter.ExplainingPredicate<RuntimeException>() {
3917
3918      @Override
3919      public boolean evaluate() {
3920        for (String label : labels) {
3921          if (labelsCache.getLabelOrdinal(label) == 0) {
3922            return false;
3923          }
3924        }
3925        return true;
3926      }
3927
3928      @Override
3929      public String explainFailure() {
3930        for (String label : labels) {
3931          if (labelsCache.getLabelOrdinal(label) == 0) {
3932            return label + " is not available yet";
3933          }
3934        }
3935        return "";
3936      }
3937    });
3938  }
3939
3940  /**
3941   * Create a set of column descriptors with the combination of compression, encoding, bloom codecs
3942   * available.
3943   * @return the list of column descriptors
3944   */
3945  public static List<ColumnFamilyDescriptor> generateColumnDescriptors() {
3946    return generateColumnDescriptors("");
3947  }
3948
3949  /**
3950   * Create a set of column descriptors with the combination of compression, encoding, bloom codecs
3951   * available.
3952   * @param prefix family names prefix
3953   * @return the list of column descriptors
3954   */
3955  public static List<ColumnFamilyDescriptor> generateColumnDescriptors(final String prefix) {
3956    List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
3957    long familyId = 0;
3958    for (Compression.Algorithm compressionType : getSupportedCompressionAlgorithms()) {
3959      for (DataBlockEncoding encodingType : DataBlockEncoding.values()) {
3960        for (BloomType bloomType : BloomType.values()) {
3961          String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3962          ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder =
3963            ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(name));
3964          columnFamilyDescriptorBuilder.setCompressionType(compressionType);
3965          columnFamilyDescriptorBuilder.setDataBlockEncoding(encodingType);
3966          columnFamilyDescriptorBuilder.setBloomFilterType(bloomType);
3967          columnFamilyDescriptors.add(columnFamilyDescriptorBuilder.build());
3968          familyId++;
3969        }
3970      }
3971    }
3972    return columnFamilyDescriptors;
3973  }
3974
3975  /**
3976   * Get supported compression algorithms.
3977   * @return supported compression algorithms.
3978   */
3979  public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3980    String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3981    List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
3982    for (String algoName : allAlgos) {
3983      try {
3984        Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3985        algo.getCompressor();
3986        supportedAlgos.add(algo);
3987      } catch (Throwable t) {
3988        // this algo is not available
3989      }
3990    }
3991    return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
3992  }
3993
3994  public Result getClosestRowBefore(Region r, byte[] row, byte[] family) throws IOException {
3995    Scan scan = new Scan().withStartRow(row);
3996    scan.setReadType(ReadType.PREAD);
3997    scan.setCaching(1);
3998    scan.setReversed(true);
3999    scan.addFamily(family);
4000    try (RegionScanner scanner = r.getScanner(scan)) {
4001      List<Cell> cells = new ArrayList<>(1);
4002      scanner.next(cells);
4003      if (r.getRegionInfo().isMetaRegion() && !isTargetTable(row, cells.get(0))) {
4004        return null;
4005      }
4006      return Result.create(cells);
4007    }
4008  }
4009
4010  private boolean isTargetTable(final byte[] inRow, Cell c) {
4011    String inputRowString = Bytes.toString(inRow);
4012    int i = inputRowString.indexOf(HConstants.DELIMITER);
4013    String outputRowString = Bytes.toString(c.getRowArray(), c.getRowOffset(), c.getRowLength());
4014    int o = outputRowString.indexOf(HConstants.DELIMITER);
4015    return inputRowString.substring(0, i).equals(outputRowString.substring(0, o));
4016  }
4017
4018  /**
4019   * Sets up {@link MiniKdc} for testing security. Uses {@link HBaseKerberosUtils} to set the given
4020   * keytab file as {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}. FYI, there is also the easier-to-use
4021   * kerby KDC server and utility for using it,
4022   * {@link org.apache.hadoop.hbase.util.SimpleKdcServerUtil}. The kerby KDC server is preferred;
4023   * less baggage. It came in in HBASE-5291.
4024   */
4025  public MiniKdc setupMiniKdc(File keytabFile) throws Exception {
4026    Properties conf = MiniKdc.createConf();
4027    conf.put(MiniKdc.DEBUG, true);
4028    MiniKdc kdc = null;
4029    File dir = null;
4030    // There is time lag between selecting a port and trying to bind with it. It's possible that
4031    // another service captures the port in between which'll result in BindException.
4032    boolean bindException;
4033    int numTries = 0;
4034    do {
4035      try {
4036        bindException = false;
4037        dir = new File(getDataTestDir("kdc").toUri().getPath());
4038        kdc = new MiniKdc(conf, dir);
4039        kdc.start();
4040      } catch (BindException e) {
4041        FileUtils.deleteDirectory(dir); // clean directory
4042        numTries++;
4043        if (numTries == 3) {
4044          LOG.error("Failed setting up MiniKDC. Tried " + numTries + " times.");
4045          throw e;
4046        }
4047        LOG.error("BindException encountered when setting up MiniKdc. Trying again.");
4048        bindException = true;
4049      }
4050    } while (bindException);
4051    HBaseKerberosUtils.setKeytabFileForTesting(keytabFile.getAbsolutePath());
4052    return kdc;
4053  }
4054
4055  public int getNumHFiles(final TableName tableName, final byte[] family) {
4056    int numHFiles = 0;
4057    for (RegionServerThread regionServerThread : getMiniHBaseCluster().getRegionServerThreads()) {
4058      numHFiles += getNumHFilesForRS(regionServerThread.getRegionServer(), tableName, family);
4059    }
4060    return numHFiles;
4061  }
4062
4063  public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName,
4064    final byte[] family) {
4065    int numHFiles = 0;
4066    for (Region region : rs.getRegions(tableName)) {
4067      numHFiles += region.getStore(family).getStorefilesCount();
4068    }
4069    return numHFiles;
4070  }
4071
4072  private void assertEquals(String message, int expected, int actual) {
4073    if (expected == actual) {
4074      return;
4075    }
4076    String formatted = "";
4077    if (message != null && !"".equals(message)) {
4078      formatted = message + " ";
4079    }
4080    throw new AssertionError(formatted + "expected:<" + expected + "> but was:<" + actual + ">");
4081  }
4082
4083  public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd, TableDescriptor rtd) {
4084    if (ltd.getValues().hashCode() != rtd.getValues().hashCode()) {
4085      throw new AssertionError();
4086    }
4087    assertEquals("", ltd.getValues().hashCode(), rtd.getValues().hashCode());
4088    Collection<ColumnFamilyDescriptor> ltdFamilies = Arrays.asList(ltd.getColumnFamilies());
4089    Collection<ColumnFamilyDescriptor> rtdFamilies = Arrays.asList(rtd.getColumnFamilies());
4090    assertEquals("", ltdFamilies.size(), rtdFamilies.size());
4091    for (Iterator<ColumnFamilyDescriptor> it = ltdFamilies.iterator(),
4092        it2 = rtdFamilies.iterator(); it.hasNext();) {
4093      assertEquals("", 0, ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next()));
4094    }
4095  }
4096
4097  /**
4098   * Await the successful return of {@code condition}, sleeping {@code sleepMillis} between
4099   * invocations.
4100   */
4101  public static void await(final long sleepMillis, final BooleanSupplier condition)
4102    throws InterruptedException {
4103    try {
4104      while (!condition.getAsBoolean()) {
4105        Thread.sleep(sleepMillis);
4106      }
4107    } catch (RuntimeException e) {
4108      if (e.getCause() instanceof AssertionError) {
4109        throw (AssertionError) e.getCause();
4110      }
4111      throw e;
4112    }
4113  }
4114}