001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertTrue;
022import static org.junit.Assert.fail;
023
024import edu.umd.cs.findbugs.annotations.Nullable;
025import java.io.File;
026import java.io.IOException;
027import java.io.OutputStream;
028import java.lang.reflect.Field;
029import java.lang.reflect.Modifier;
030import java.net.BindException;
031import java.net.DatagramSocket;
032import java.net.InetAddress;
033import java.net.ServerSocket;
034import java.net.Socket;
035import java.net.UnknownHostException;
036import java.nio.charset.StandardCharsets;
037import java.security.MessageDigest;
038import java.util.ArrayList;
039import java.util.Arrays;
040import java.util.Collection;
041import java.util.Collections;
042import java.util.HashSet;
043import java.util.Iterator;
044import java.util.List;
045import java.util.Map;
046import java.util.NavigableSet;
047import java.util.Properties;
048import java.util.Random;
049import java.util.Set;
050import java.util.TreeSet;
051import java.util.concurrent.TimeUnit;
052import java.util.concurrent.atomic.AtomicReference;
053import java.util.stream.Collectors;
054import java.util.function.BooleanSupplier;
055import org.apache.commons.io.FileUtils;
056import org.apache.commons.lang3.RandomStringUtils;
057import org.apache.hadoop.conf.Configuration;
058import org.apache.hadoop.fs.FileSystem;
059import org.apache.hadoop.fs.Path;
060import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
061import org.apache.hadoop.hbase.Waiter.Predicate;
062import org.apache.hadoop.hbase.client.Admin;
063import org.apache.hadoop.hbase.client.BufferedMutator;
064import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
065import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
066import org.apache.hadoop.hbase.client.Connection;
067import org.apache.hadoop.hbase.client.ConnectionFactory;
068import org.apache.hadoop.hbase.client.Consistency;
069import org.apache.hadoop.hbase.client.Delete;
070import org.apache.hadoop.hbase.client.Durability;
071import org.apache.hadoop.hbase.client.Get;
072import org.apache.hadoop.hbase.client.HBaseAdmin;
073import org.apache.hadoop.hbase.client.Hbck;
074import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
075import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
076import org.apache.hadoop.hbase.client.MasterRegistry;
077import org.apache.hadoop.hbase.client.Put;
078import org.apache.hadoop.hbase.client.RegionInfo;
079import org.apache.hadoop.hbase.client.RegionInfoBuilder;
080import org.apache.hadoop.hbase.client.RegionLocator;
081import org.apache.hadoop.hbase.client.Result;
082import org.apache.hadoop.hbase.client.ResultScanner;
083import org.apache.hadoop.hbase.client.Scan;
084import org.apache.hadoop.hbase.client.Table;
085import org.apache.hadoop.hbase.client.TableDescriptor;
086import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
087import org.apache.hadoop.hbase.client.TableState;
088import org.apache.hadoop.hbase.fs.HFileSystem;
089import org.apache.hadoop.hbase.io.compress.Compression;
090import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
091import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
092import org.apache.hadoop.hbase.io.hfile.BlockCache;
093import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
094import org.apache.hadoop.hbase.io.hfile.HFile;
095import org.apache.hadoop.hbase.ipc.RpcServerInterface;
096import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
097import org.apache.hadoop.hbase.logging.Log4jUtils;
098import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
099import org.apache.hadoop.hbase.master.HMaster;
100import org.apache.hadoop.hbase.master.RegionState;
101import org.apache.hadoop.hbase.master.ServerManager;
102import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
103import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
104import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
105import org.apache.hadoop.hbase.master.assignment.RegionStates;
106import org.apache.hadoop.hbase.mob.MobFileCache;
107import org.apache.hadoop.hbase.regionserver.BloomType;
108import org.apache.hadoop.hbase.regionserver.ChunkCreator;
109import org.apache.hadoop.hbase.regionserver.HRegion;
110import org.apache.hadoop.hbase.regionserver.HRegionServer;
111import org.apache.hadoop.hbase.regionserver.HStore;
112import org.apache.hadoop.hbase.regionserver.InternalScanner;
113import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
114import org.apache.hadoop.hbase.regionserver.Region;
115import org.apache.hadoop.hbase.regionserver.RegionScanner;
116import org.apache.hadoop.hbase.regionserver.RegionServerServices;
117import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
118import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
119import org.apache.hadoop.hbase.security.User;
120import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
121import org.apache.hadoop.hbase.trace.TraceUtil;
122import org.apache.hadoop.hbase.util.Bytes;
123import org.apache.hadoop.hbase.util.CommonFSUtils;
124import org.apache.hadoop.hbase.util.FSTableDescriptors;
125import org.apache.hadoop.hbase.util.FSUtils;
126import org.apache.hadoop.hbase.util.JVMClusterUtil;
127import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
128import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
129import org.apache.hadoop.hbase.util.Pair;
130import org.apache.hadoop.hbase.util.RegionSplitter;
131import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
132import org.apache.hadoop.hbase.util.RetryCounter;
133import org.apache.hadoop.hbase.util.Threads;
134import org.apache.hadoop.hbase.wal.WAL;
135import org.apache.hadoop.hbase.wal.WALFactory;
136import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
137import org.apache.hadoop.hbase.zookeeper.ZKConfig;
138import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
139import org.apache.hadoop.hdfs.DFSClient;
140import org.apache.hadoop.hdfs.DistributedFileSystem;
141import org.apache.hadoop.hdfs.MiniDFSCluster;
142import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
143import org.apache.hadoop.mapred.JobConf;
144import org.apache.hadoop.mapred.MiniMRCluster;
145import org.apache.hadoop.mapred.TaskLog;
146import org.apache.hadoop.minikdc.MiniKdc;
147import org.apache.yetus.audience.InterfaceAudience;
148import org.apache.zookeeper.WatchedEvent;
149import org.apache.zookeeper.ZooKeeper;
150import org.apache.zookeeper.ZooKeeper.States;
151
152import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
153import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
154import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
155
156/**
157 * Facility for testing HBase. Replacement for
158 * old HBaseTestCase and HBaseClusterTestCase functionality.
159 * Create an instance and keep it around testing HBase.  This class is
160 * meant to be your one-stop shop for anything you might need testing.  Manages
161 * one cluster at a time only. Managed cluster can be an in-process
162 * {@link MiniHBaseCluster}, or a deployed cluster of type {@code DistributedHBaseCluster}.
163 * Not all methods work with the real cluster.
164 * Depends on log4j being on classpath and
165 * hbase-site.xml for logging and test-run configuration.  It does not set
166 * logging levels.
167 * In the configuration properties, default values for master-info-port and
168 * region-server-port are overridden such that a random port will be assigned (thus
169 * avoiding port contention if another local HBase instance is already running).
170 * <p>To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
171 * setting it to true.
172 */
173@InterfaceAudience.Public
174@SuppressWarnings("deprecation")
175public class HBaseTestingUtility extends HBaseZKTestingUtility {
176
177  /**
178   * System property key to get test directory value. Name is as it is because mini dfs has
179   * hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property
180   * used in mini dfs.
181   * @deprecated since 2.0.0 and will be removed in 3.0.0. Can be used only with mini dfs.
182   * @see <a href="https://issues.apache.org/jira/browse/HBASE-19410">HBASE-19410</a>
183   */
184  @Deprecated
185  private static final String TEST_DIRECTORY_KEY = "test.build.data";
186
187  public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
188  /**
189   * The default number of regions per regionserver when creating a pre-split
190   * table.
191   */
192  public static final int DEFAULT_REGIONS_PER_SERVER = 3;
193
194
195  public static final String PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table";
196  public static final boolean PRESPLIT_TEST_TABLE = true;
197
198  private MiniDFSCluster dfsCluster = null;
199
200  private volatile HBaseCluster hbaseCluster = null;
201  private MiniMRCluster mrCluster = null;
202
203  /** If there is a mini cluster running for this testing utility instance. */
204  private volatile boolean miniClusterRunning;
205
206  private String hadoopLogDir;
207
208  /** Directory on test filesystem where we put the data for this instance of
209    * HBaseTestingUtility*/
210  private Path dataTestDirOnTestFS = null;
211
212  /**
213   * Shared cluster connection.
214   */
215  private volatile Connection connection;
216
217  /** Filesystem URI used for map-reduce mini-cluster setup */
218  private static String FS_URI;
219
220  /** This is for unit tests parameterized with a single boolean. */
221  public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination();
222
223  /**
224   * Checks to see if a specific port is available.
225   *
226   * @param port the port number to check for availability
227   * @return <tt>true</tt> if the port is available, or <tt>false</tt> if not
228   */
229  public static boolean available(int port) {
230    ServerSocket ss = null;
231    DatagramSocket ds = null;
232    try {
233      ss = new ServerSocket(port);
234      ss.setReuseAddress(true);
235      ds = new DatagramSocket(port);
236      ds.setReuseAddress(true);
237      return true;
238    } catch (IOException e) {
239      // Do nothing
240    } finally {
241      if (ds != null) {
242        ds.close();
243      }
244
245      if (ss != null) {
246        try {
247          ss.close();
248        } catch (IOException e) {
249          /* should not be thrown */
250        }
251      }
252    }
253
254    return false;
255  }
256
257  /**
258   * Create all combinations of Bloom filters and compression algorithms for
259   * testing.
260   */
261  private static List<Object[]> bloomAndCompressionCombinations() {
262    List<Object[]> configurations = new ArrayList<>();
263    for (Compression.Algorithm comprAlgo :
264         HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
265      for (BloomType bloomType : BloomType.values()) {
266        configurations.add(new Object[] { comprAlgo, bloomType });
267      }
268    }
269    return Collections.unmodifiableList(configurations);
270  }
271
272  /**
273   * Create combination of memstoreTS and tags
274   */
275  private static List<Object[]> memStoreTSAndTagsCombination() {
276    List<Object[]> configurations = new ArrayList<>();
277    configurations.add(new Object[] { false, false });
278    configurations.add(new Object[] { false, true });
279    configurations.add(new Object[] { true, false });
280    configurations.add(new Object[] { true, true });
281    return Collections.unmodifiableList(configurations);
282  }
283
284  public static List<Object[]> memStoreTSTagsAndOffheapCombination() {
285    List<Object[]> configurations = new ArrayList<>();
286    configurations.add(new Object[] { false, false, true });
287    configurations.add(new Object[] { false, false, false });
288    configurations.add(new Object[] { false, true, true });
289    configurations.add(new Object[] { false, true, false });
290    configurations.add(new Object[] { true, false, true });
291    configurations.add(new Object[] { true, false, false });
292    configurations.add(new Object[] { true, true, true });
293    configurations.add(new Object[] { true, true, false });
294    return Collections.unmodifiableList(configurations);
295  }
296
297  public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
298      bloomAndCompressionCombinations();
299
300
301  /**
302   * <p>Create an HBaseTestingUtility using a default configuration.
303   *
304   * <p>Initially, all tmp files are written to a local test data directory.
305   * Once {@link #startMiniDFSCluster} is called, either directly or via
306   * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead.
307   *
308   * <p>Previously, there was a distinction between the type of utility returned by
309   * {@link #createLocalHTU()} and this constructor; this is no longer the case. All
310   * HBaseTestingUtility objects will behave as local until a DFS cluster is started,
311   * at which point they will switch to using mini DFS for storage.
312   */
313  public HBaseTestingUtility() {
314    this(HBaseConfiguration.create());
315  }
316
317  /**
318   * <p>Create an HBaseTestingUtility using a given configuration.
319   *
320   * <p>Initially, all tmp files are written to a local test data directory.
321   * Once {@link #startMiniDFSCluster} is called, either directly or via
322   * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead.
323   *
324   * <p>Previously, there was a distinction between the type of utility returned by
325   * {@link #createLocalHTU()} and this constructor; this is no longer the case. All
326   * HBaseTestingUtility objects will behave as local until a DFS cluster is started,
327   * at which point they will switch to using mini DFS for storage.
328   *
329   * @param conf The configuration to use for further operations
330   */
331  public HBaseTestingUtility(@Nullable Configuration conf) {
332    super(conf);
333
334    // a hbase checksum verification failure will cause unit tests to fail
335    ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
336
337    // Save this for when setting default file:// breaks things
338    if (this.conf.get("fs.defaultFS") != null) {
339      this.conf.set("original.defaultFS", this.conf.get("fs.defaultFS"));
340    }
341    if (this.conf.get(HConstants.HBASE_DIR) != null) {
342      this.conf.set("original.hbase.dir", this.conf.get(HConstants.HBASE_DIR));
343    }
344    // Every cluster is a local cluster until we start DFS
345    // Note that conf could be null, but this.conf will not be
346    String dataTestDir = getDataTestDir().toString();
347    this.conf.set("fs.defaultFS","file:///");
348    this.conf.set(HConstants.HBASE_DIR, "file://" + dataTestDir);
349    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);
350    this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE,false);
351    // If the value for random ports isn't set set it to true, thus making
352    // tests opt-out for random port assignment
353    this.conf.setBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS,
354        this.conf.getBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true));
355  }
356
357  /**
358   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #HBaseTestingUtility()}
359   *   instead.
360   * @return a normal HBaseTestingUtility
361   * @see #HBaseTestingUtility()
362   * @see <a href="https://issues.apache.org/jira/browse/HBASE-19841">HBASE-19841</a>
363   */
364  @Deprecated
365  public static HBaseTestingUtility createLocalHTU() {
366    return new HBaseTestingUtility();
367  }
368
369  /**
370   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
371   *   {@link #HBaseTestingUtility(Configuration)} instead.
372   * @return a normal HBaseTestingUtility
373   * @see #HBaseTestingUtility(Configuration)
374   * @see <a href="https://issues.apache.org/jira/browse/HBASE-19841">HBASE-19841</a>
375   */
376  @Deprecated
377  public static HBaseTestingUtility createLocalHTU(Configuration c) {
378    return new HBaseTestingUtility(c);
379  }
380
381  /**
382   * Close both the region {@code r} and it's underlying WAL. For use in tests.
383   */
384  public static void closeRegionAndWAL(final Region r) throws IOException {
385    closeRegionAndWAL((HRegion)r);
386  }
387
388  /**
389   * Close both the HRegion {@code r} and it's underlying WAL. For use in tests.
390   */
391  public static void closeRegionAndWAL(final HRegion r) throws IOException {
392    if (r == null) return;
393    r.close();
394    if (r.getWAL() == null) return;
395    r.getWAL().close();
396  }
397
398  /**
399   * Returns this classes's instance of {@link Configuration}.  Be careful how
400   * you use the returned Configuration since {@link Connection} instances
401   * can be shared.  The Map of Connections is keyed by the Configuration.  If
402   * say, a Connection was being used against a cluster that had been shutdown,
403   * see {@link #shutdownMiniCluster()}, then the Connection will no longer
404   * be wholesome.  Rather than use the return direct, its usually best to
405   * make a copy and use that.  Do
406   * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
407   * @return Instance of Configuration.
408   */
409  @Override
410  public Configuration getConfiguration() {
411    return super.getConfiguration();
412  }
413
414  public void setHBaseCluster(HBaseCluster hbaseCluster) {
415    this.hbaseCluster = hbaseCluster;
416  }
417
418  /**
419   * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
420   * Give it a random name so can have many concurrent tests running if
421   * we need to.  It needs to amend the {@link #TEST_DIRECTORY_KEY}
422   * System property, as it's what minidfscluster bases
423   * it data dir on.  Moding a System property is not the way to do concurrent
424   * instances -- another instance could grab the temporary
425   * value unintentionally -- but not anything can do about it at moment;
426   * single instance only is how the minidfscluster works.
427   *
428   * We also create the underlying directory names for
429   *  hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values
430   *  in the conf, and as a system property for hadoop.tmp.dir (We do not create them!).
431   *
432   * @return The calculated data test build directory, if newly-created.
433   */
434  @Override
435  protected Path setupDataTestDir() {
436    Path testPath = super.setupDataTestDir();
437    if (null == testPath) {
438      return null;
439    }
440
441    createSubDirAndSystemProperty(
442      "hadoop.log.dir",
443      testPath, "hadoop-log-dir");
444
445    // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
446    //  we want our own value to ensure uniqueness on the same machine
447    createSubDirAndSystemProperty(
448      "hadoop.tmp.dir",
449      testPath, "hadoop-tmp-dir");
450
451    // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
452    createSubDir(
453      "mapreduce.cluster.local.dir",
454      testPath, "mapred-local-dir");
455    return testPath;
456  }
457
458  private void createSubDirAndSystemProperty(
459    String propertyName, Path parent, String subDirName){
460
461    String sysValue = System.getProperty(propertyName);
462
463    if (sysValue != null) {
464      // There is already a value set. So we do nothing but hope
465      //  that there will be no conflicts
466      LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
467        sysValue + " so I do NOT create it in " + parent);
468      String confValue = conf.get(propertyName);
469      if (confValue != null && !confValue.endsWith(sysValue)){
470       LOG.warn(
471         propertyName + " property value differs in configuration and system: "+
472         "Configuration="+confValue+" while System="+sysValue+
473         " Erasing configuration value by system value."
474       );
475      }
476      conf.set(propertyName, sysValue);
477    } else {
478      // Ok, it's not set, so we create it as a subdirectory
479      createSubDir(propertyName, parent, subDirName);
480      System.setProperty(propertyName, conf.get(propertyName));
481    }
482  }
483
484  /**
485   * @return Where to write test data on the test filesystem; Returns working directory
486   * for the test filesystem by default
487   * @see #setupDataTestDirOnTestFS()
488   * @see #getTestFileSystem()
489   */
490  private Path getBaseTestDirOnTestFS() throws IOException {
491    FileSystem fs = getTestFileSystem();
492    return new Path(fs.getWorkingDirectory(), "test-data");
493  }
494
495  /**
496   * @return META table descriptor
497   * @deprecated since 2.0 version and will be removed in 3.0 version. Currently for test only.
498   *             use {@link #getMetaTableDescriptorBuilder()}
499   */
500  @Deprecated
501  public HTableDescriptor getMetaTableDescriptor() {
502    return new ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());
503  }
504
505  /**
506   * @return META table descriptor
507   * @deprecated Since 2.3.0. No one should be using this internal. Used in testing only.
508   */
509  @Deprecated
510  @VisibleForTesting
511  public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
512    try {
513      return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
514    } catch (IOException e) {
515      throw new RuntimeException("Unable to create META table descriptor", e);
516    }
517  }
518
519  /**
520   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
521   * to write temporary test data. Call this method after setting up the mini dfs cluster
522   * if the test relies on it.
523   * @return a unique path in the test filesystem
524   */
525  public Path getDataTestDirOnTestFS() throws IOException {
526    if (dataTestDirOnTestFS == null) {
527      setupDataTestDirOnTestFS();
528    }
529
530    return dataTestDirOnTestFS;
531  }
532
533  /**
534   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
535   * to write temporary test data. Call this method after setting up the mini dfs cluster
536   * if the test relies on it.
537   * @return a unique path in the test filesystem
538   * @param subdirName name of the subdir to create under the base test dir
539   */
540  public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
541    return new Path(getDataTestDirOnTestFS(), subdirName);
542  }
543
544  /**
545   * Sets up a path in test filesystem to be used by tests.
546   * Creates a new directory if not already setup.
547   */
548  private void setupDataTestDirOnTestFS() throws IOException {
549    if (dataTestDirOnTestFS != null) {
550      LOG.warn("Data test on test fs dir already setup in "
551          + dataTestDirOnTestFS.toString());
552      return;
553    }
554    dataTestDirOnTestFS = getNewDataTestDirOnTestFS();
555  }
556
557  /**
558   * Sets up a new path in test filesystem to be used by tests.
559   */
560  private Path getNewDataTestDirOnTestFS() throws IOException {
561    //The file system can be either local, mini dfs, or if the configuration
562    //is supplied externally, it can be an external cluster FS. If it is a local
563    //file system, the tests should use getBaseTestDir, otherwise, we can use
564    //the working directory, and create a unique sub dir there
565    FileSystem fs = getTestFileSystem();
566    Path newDataTestDir;
567    String randomStr = getRandomUUID().toString();
568    if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
569      newDataTestDir = new Path(getDataTestDir(), randomStr);
570      File dataTestDir = new File(newDataTestDir.toString());
571      if (deleteOnExit()) dataTestDir.deleteOnExit();
572    } else {
573      Path base = getBaseTestDirOnTestFS();
574      newDataTestDir = new Path(base, randomStr);
575      if (deleteOnExit()) fs.deleteOnExit(newDataTestDir);
576    }
577    return newDataTestDir;
578  }
579
580  /**
581   * Cleans the test data directory on the test filesystem.
582   * @return True if we removed the test dirs
583   * @throws IOException
584   */
585  public boolean cleanupDataTestDirOnTestFS() throws IOException {
586    boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
587    if (ret)
588      dataTestDirOnTestFS = null;
589    return ret;
590  }
591
592  /**
593   * Cleans a subdirectory under the test data directory on the test filesystem.
594   * @return True if we removed child
595   * @throws IOException
596   */
597  public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
598    Path cpath = getDataTestDirOnTestFS(subdirName);
599    return getTestFileSystem().delete(cpath, true);
600  }
601
602  /**
603   * Start a minidfscluster.
604   * @param servers How many DNs to start.
605   * @throws Exception
606   * @see #shutdownMiniDFSCluster()
607   * @return The mini dfs cluster created.
608   */
609  public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
610    return startMiniDFSCluster(servers, null);
611  }
612
613  /**
614   * Start a minidfscluster.
615   * This is useful if you want to run datanode on distinct hosts for things
616   * like HDFS block location verification.
617   * If you start MiniDFSCluster without host names, all instances of the
618   * datanodes will have the same host name.
619   * @param hosts hostnames DNs to run on.
620   * @throws Exception
621   * @see #shutdownMiniDFSCluster()
622   * @return The mini dfs cluster created.
623   */
624  public MiniDFSCluster startMiniDFSCluster(final String hosts[])
625  throws Exception {
626    if ( hosts != null && hosts.length != 0) {
627      return startMiniDFSCluster(hosts.length, hosts);
628    } else {
629      return startMiniDFSCluster(1, null);
630    }
631  }
632
633  /**
634   * Start a minidfscluster.
635   * Can only create one.
636   * @param servers How many DNs to start.
637   * @param hosts hostnames DNs to run on.
638   * @throws Exception
639   * @see #shutdownMiniDFSCluster()
640   * @return The mini dfs cluster created.
641   */
642  public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
643  throws Exception {
644    return startMiniDFSCluster(servers, null, hosts);
645  }
646
647  private void setFs() throws IOException {
648    if(this.dfsCluster == null){
649      LOG.info("Skipping setting fs because dfsCluster is null");
650      return;
651    }
652    FileSystem fs = this.dfsCluster.getFileSystem();
653    CommonFSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
654
655    // re-enable this check with dfs
656    conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE);
657  }
658
659  public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
660      throws Exception {
661    createDirsAndSetProperties();
662    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
663
664    // Error level to skip some warnings specific to the minicluster. See HBASE-4709
665    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR");
666    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(),
667      "ERROR");
668
669    TraceUtil.initTracer(conf);
670
671    this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
672        true, null, racks, hosts, null);
673
674    // Set this just-started cluster as our filesystem.
675    setFs();
676
677    // Wait for the cluster to be totally up
678    this.dfsCluster.waitClusterUp();
679
680    //reset the test directory for test file system
681    dataTestDirOnTestFS = null;
682    String dataTestDir = getDataTestDir().toString();
683    conf.set(HConstants.HBASE_DIR, dataTestDir);
684    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);
685
686    return this.dfsCluster;
687  }
688
689  public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException {
690    createDirsAndSetProperties();
691    // Error level to skip some warnings specific to the minicluster. See HBASE-4709
692    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR");
693    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(),
694      "ERROR");
695    dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
696        null, null, null);
697    return dfsCluster;
698  }
699
700  /**
701   * This is used before starting HDFS and map-reduce mini-clusters Run something like the below to
702   * check for the likes of '/tmp' references -- i.e. references outside of the test data dir -- in
703   * the conf.
704   *
705   * <pre>
706   * Configuration conf = TEST_UTIL.getConfiguration();
707   * for (Iterator&lt;Map.Entry&lt;String, String&gt;&gt; i = conf.iterator(); i.hasNext();) {
708   *   Map.Entry&lt;String, String&gt; e = i.next();
709   *   assertFalse(e.getKey() + " " + e.getValue(), e.getValue().contains("/tmp"));
710   * }
711   * </pre>
712   */
713  private void createDirsAndSetProperties() throws IOException {
714    setupClusterTestDir();
715    conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
716    System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
717    createDirAndSetProperty("test.cache.data");
718    createDirAndSetProperty("hadoop.tmp.dir");
719    hadoopLogDir = createDirAndSetProperty("hadoop.log.dir");
720    createDirAndSetProperty("mapreduce.cluster.local.dir");
721    createDirAndSetProperty("mapreduce.cluster.temp.dir");
722    enableShortCircuit();
723
724    Path root = getDataTestDirOnTestFS("hadoop");
725    conf.set(MapreduceTestingShim.getMROutputDirProp(),
726      new Path(root, "mapred-output-dir").toString());
727    conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());
728    conf.set("mapreduce.jobtracker.staging.root.dir",
729      new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
730    conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());
731    conf.set("yarn.app.mapreduce.am.staging-dir",
732      new Path(root, "mapreduce-am-staging-root-dir").toString());
733
734    // Frustrate yarn's and hdfs's attempts at writing /tmp.
735    // Below is fragile. Make it so we just interpolate any 'tmp' reference.
736    createDirAndSetProperty("yarn.node-labels.fs-store.root-dir");
737    createDirAndSetProperty("yarn.node-attribute.fs-store.root-dir");
738    createDirAndSetProperty("yarn.nodemanager.log-dirs");
739    createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
740    createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.active-dir");
741    createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.done-dir");
742    createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
743    createDirAndSetProperty("dfs.journalnode.edits.dir");
744    createDirAndSetProperty("dfs.datanode.shared.file.descriptor.paths");
745    createDirAndSetProperty("nfs.dump.dir");
746    createDirAndSetProperty("java.io.tmpdir");
747    createDirAndSetProperty("dfs.journalnode.edits.dir");
748    createDirAndSetProperty("dfs.provided.aliasmap.inmemory.leveldb.dir");
749    createDirAndSetProperty("fs.s3a.committer.staging.tmp.path");
750  }
751
752  /**
753   *  Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating
754   *  new column families. Default to false.
755   */
756  public boolean isNewVersionBehaviorEnabled(){
757    final String propName = "hbase.tests.new.version.behavior";
758    String v = System.getProperty(propName);
759    if (v != null){
760      return Boolean.parseBoolean(v);
761    }
762    return false;
763  }
764
765  /**
766   *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
767   *  This allows to specify this parameter on the command line.
768   *   If not set, default is true.
769   */
770  public boolean isReadShortCircuitOn(){
771    final String propName = "hbase.tests.use.shortcircuit.reads";
772    String readOnProp = System.getProperty(propName);
773    if (readOnProp != null){
774      return  Boolean.parseBoolean(readOnProp);
775    } else {
776      return conf.getBoolean(propName, false);
777    }
778  }
779
780  /** Enable the short circuit read, unless configured differently.
781   * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
782   */
783  private void enableShortCircuit() {
784    if (isReadShortCircuitOn()) {
785      String curUser = System.getProperty("user.name");
786      LOG.info("read short circuit is ON for user " + curUser);
787      // read short circuit, for hdfs
788      conf.set("dfs.block.local-path-access.user", curUser);
789      // read short circuit, for hbase
790      conf.setBoolean("dfs.client.read.shortcircuit", true);
791      // Skip checking checksum, for the hdfs client and the datanode
792      conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
793    } else {
794      LOG.info("read short circuit is OFF");
795    }
796  }
797
798  private String createDirAndSetProperty(String property) {
799    return createDirAndSetProperty(property, property);
800  }
801
802  private String createDirAndSetProperty(final String relPath, String property) {
803    String path = getDataTestDir(relPath).toString();
804    System.setProperty(property, path);
805    conf.set(property, path);
806    new File(path).mkdirs();
807    LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
808    return path;
809  }
810
811  /**
812   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
813   * or does nothing.
814   * @throws IOException
815   */
816  public void shutdownMiniDFSCluster() throws IOException {
817    if (this.dfsCluster != null) {
818      // The below throws an exception per dn, AsynchronousCloseException.
819      this.dfsCluster.shutdown();
820      dfsCluster = null;
821      dataTestDirOnTestFS = null;
822      CommonFSUtils.setFsDefault(this.conf, new Path("file:///"));
823    }
824  }
825
826  /**
827   * Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
828   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
829   * @param createWALDir Whether to create a new WAL directory.
830   * @return The mini HBase cluster created.
831   * @see #shutdownMiniCluster()
832   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
833   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
834   * @see #startMiniCluster(StartMiniClusterOption)
835   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
836   */
837  @Deprecated
838  public MiniHBaseCluster startMiniCluster(boolean createWALDir) throws Exception {
839    StartMiniClusterOption option = StartMiniClusterOption.builder()
840        .createWALDir(createWALDir).build();
841    return startMiniCluster(option);
842  }
843
844  /**
845   * Start up a minicluster of hbase, dfs, and zookeeper.
846   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
847   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
848   * @param createRootDir Whether to create a new root or data directory path.
849   * @return The mini HBase cluster created.
850   * @see #shutdownMiniCluster()
851   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
852   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
853   * @see #startMiniCluster(StartMiniClusterOption)
854   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
855   */
856  @Deprecated
857  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir)
858  throws Exception {
859    StartMiniClusterOption option = StartMiniClusterOption.builder()
860        .numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir).build();
861    return startMiniCluster(option);
862  }
863
864  /**
865   * Start up a minicluster of hbase, dfs, and zookeeper.
866   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
867   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
868   * @param createRootDir Whether to create a new root or data directory path.
869   * @param createWALDir Whether to create a new WAL directory.
870   * @return The mini HBase cluster created.
871   * @see #shutdownMiniCluster()
872   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
873   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
874   * @see #startMiniCluster(StartMiniClusterOption)
875   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
876   */
877  @Deprecated
878  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir,
879      boolean createWALDir) throws Exception {
880    StartMiniClusterOption option = StartMiniClusterOption.builder()
881        .numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir)
882        .createWALDir(createWALDir).build();
883    return startMiniCluster(option);
884  }
885
886  /**
887   * Start up a minicluster of hbase, dfs, and zookeeper.
888   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
889   * @param numMasters Master node number.
890   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
891   * @param createRootDir Whether to create a new root or data directory path.
892   * @return The mini HBase cluster created.
893   * @see #shutdownMiniCluster()
894   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
895   *  {@link #startMiniCluster(StartMiniClusterOption)} instead.
896   * @see #startMiniCluster(StartMiniClusterOption)
897   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
898   */
899  @Deprecated
900  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, boolean createRootDir)
901    throws Exception {
902    StartMiniClusterOption option = StartMiniClusterOption.builder()
903        .numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
904        .numDataNodes(numSlaves).build();
905    return startMiniCluster(option);
906  }
907
908  /**
909   * Start up a minicluster of hbase, dfs, and zookeeper.
910   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
911   * @param numMasters Master node number.
912   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
913   * @return The mini HBase cluster created.
914   * @see #shutdownMiniCluster()
915   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
916   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
917   * @see #startMiniCluster(StartMiniClusterOption)
918   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
919   */
920  @Deprecated
921  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves) throws Exception {
922    StartMiniClusterOption option = StartMiniClusterOption.builder()
923        .numMasters(numMasters).numRegionServers(numSlaves).numDataNodes(numSlaves).build();
924    return startMiniCluster(option);
925  }
926
927  /**
928   * Start up a minicluster of hbase, dfs, and zookeeper.
929   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
930   * @param numMasters Master node number.
931   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
932   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
933   *                      HDFS data node number.
934   * @param createRootDir Whether to create a new root or data directory path.
935   * @return The mini HBase cluster created.
936   * @see #shutdownMiniCluster()
937   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
938   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
939   * @see #startMiniCluster(StartMiniClusterOption)
940   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
941   */
942  @Deprecated
943  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
944      boolean createRootDir) throws Exception {
945    StartMiniClusterOption option = StartMiniClusterOption.builder()
946        .numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
947        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
948    return startMiniCluster(option);
949  }
950
951  /**
952   * Start up a minicluster of hbase, dfs, and zookeeper.
953   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
954   * @param numMasters Master node number.
955   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
956   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
957   *                      HDFS data node number.
958   * @return The mini HBase cluster created.
959   * @see #shutdownMiniCluster()
960   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
961   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
962   * @see #startMiniCluster(StartMiniClusterOption)
963   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
964   */
965  @Deprecated
966  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts)
967      throws Exception {
968    StartMiniClusterOption option = StartMiniClusterOption.builder()
969        .numMasters(numMasters).numRegionServers(numSlaves)
970        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
971    return startMiniCluster(option);
972  }
973
974  /**
975   * Start up a minicluster of hbase, dfs, and zookeeper.
976   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
977   * @param numMasters Master node number.
978   * @param numRegionServers Number of region servers.
979   * @param numDataNodes Number of datanodes.
980   * @return The mini HBase cluster created.
981   * @see #shutdownMiniCluster()
982   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
983   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
984   * @see #startMiniCluster(StartMiniClusterOption)
985   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
986   */
987  @Deprecated
988  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes)
989      throws Exception {
990    StartMiniClusterOption option = StartMiniClusterOption.builder()
991        .numMasters(numMasters).numRegionServers(numRegionServers).numDataNodes(numDataNodes)
992        .build();
993    return startMiniCluster(option);
994  }
995
996  /**
997   * Start up a minicluster of hbase, dfs, and zookeeper.
998   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
999   * @param numMasters Master node number.
1000   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
1001   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
1002   *                      HDFS data node number.
1003   * @param masterClass The class to use as HMaster, or null for default.
1004   * @param rsClass The class to use as HRegionServer, or null for default.
1005   * @return The mini HBase cluster created.
1006   * @see #shutdownMiniCluster()
1007   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1008   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
1009   * @see #startMiniCluster(StartMiniClusterOption)
1010   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1011   */
1012  @Deprecated
1013  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
1014      Class<? extends HMaster> masterClass,
1015      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass)
1016      throws Exception {
1017    StartMiniClusterOption option = StartMiniClusterOption.builder()
1018        .numMasters(numMasters).masterClass(masterClass)
1019        .numRegionServers(numSlaves).rsClass(rsClass)
1020        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts)
1021        .build();
1022    return startMiniCluster(option);
1023  }
1024
1025  /**
1026   * Start up a minicluster of hbase, dfs, and zookeeper.
1027   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1028   * @param numMasters Master node number.
1029   * @param numRegionServers Number of region servers.
1030   * @param numDataNodes Number of datanodes.
1031   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
1032   *                      HDFS data node number.
1033   * @param masterClass The class to use as HMaster, or null for default.
1034   * @param rsClass The class to use as HRegionServer, or null for default.
1035   * @return The mini HBase cluster created.
1036   * @see #shutdownMiniCluster()
1037   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1038   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
1039   * @see #startMiniCluster(StartMiniClusterOption)
1040   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1041   */
1042  @Deprecated
1043  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
1044      String[] dataNodeHosts, Class<? extends HMaster> masterClass,
1045      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass)
1046    throws Exception {
1047    StartMiniClusterOption option = StartMiniClusterOption.builder()
1048        .numMasters(numMasters).masterClass(masterClass)
1049        .numRegionServers(numRegionServers).rsClass(rsClass)
1050        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
1051        .build();
1052    return startMiniCluster(option);
1053  }
1054
1055  /**
1056   * Start up a minicluster of hbase, dfs, and zookeeper.
1057   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1058   * @param numMasters Master node number.
1059   * @param numRegionServers Number of region servers.
1060   * @param numDataNodes Number of datanodes.
1061   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
1062   *                      HDFS data node number.
1063   * @param masterClass The class to use as HMaster, or null for default.
1064   * @param rsClass The class to use as HRegionServer, or null for default.
1065   * @param createRootDir Whether to create a new root or data directory path.
1066   * @param createWALDir Whether to create a new WAL directory.
1067   * @return The mini HBase cluster created.
1068   * @see #shutdownMiniCluster()
1069   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1070   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
1071   * @see #startMiniCluster(StartMiniClusterOption)
1072   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1073   */
1074  @Deprecated
1075  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
1076      String[] dataNodeHosts, Class<? extends HMaster> masterClass,
1077      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass, boolean createRootDir,
1078      boolean createWALDir) throws Exception {
1079    StartMiniClusterOption option = StartMiniClusterOption.builder()
1080        .numMasters(numMasters).masterClass(masterClass)
1081        .numRegionServers(numRegionServers).rsClass(rsClass)
1082        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
1083        .createRootDir(createRootDir).createWALDir(createWALDir)
1084        .build();
1085    return startMiniCluster(option);
1086  }
1087
1088  /**
1089   * Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number.
1090   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1091   * @param numSlaves slave node number, for both HBase region server and HDFS data node.
1092   * @see #startMiniCluster(StartMiniClusterOption option)
1093   * @see #shutdownMiniDFSCluster()
1094   */
1095  public MiniHBaseCluster startMiniCluster(int numSlaves) throws Exception {
1096    StartMiniClusterOption option = StartMiniClusterOption.builder()
1097        .numRegionServers(numSlaves).numDataNodes(numSlaves).build();
1098    return startMiniCluster(option);
1099  }
1100
1101  /**
1102   * Start up a minicluster of hbase, dfs and zookeeper all using default options.
1103   * Option default value can be found in {@link StartMiniClusterOption.Builder}.
1104   * @see #startMiniCluster(StartMiniClusterOption option)
1105   * @see #shutdownMiniDFSCluster()
1106   */
1107  public MiniHBaseCluster startMiniCluster() throws Exception {
1108    return startMiniCluster(StartMiniClusterOption.builder().build());
1109  }
1110
1111  /**
1112   * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed.
1113   * It modifies Configuration.  It homes the cluster data directory under a random
1114   * subdirectory in a directory under System property test.build.data, to be cleaned up on exit.
1115   * @see #shutdownMiniDFSCluster()
1116   */
1117  public MiniHBaseCluster startMiniCluster(StartMiniClusterOption option) throws Exception {
1118    LOG.info("Starting up minicluster with option: {}", option);
1119
1120    // If we already put up a cluster, fail.
1121    if (miniClusterRunning) {
1122      throw new IllegalStateException("A mini-cluster is already running");
1123    }
1124    miniClusterRunning = true;
1125
1126    setupClusterTestDir();
1127    System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
1128
1129    // Bring up mini dfs cluster. This spews a bunch of warnings about missing
1130    // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
1131    if (dfsCluster == null) {
1132      LOG.info("STARTING DFS");
1133      dfsCluster = startMiniDFSCluster(option.getNumDataNodes(), option.getDataNodeHosts());
1134    } else {
1135      LOG.info("NOT STARTING DFS");
1136    }
1137
1138    // Start up a zk cluster.
1139    if (getZkCluster() == null) {
1140      startMiniZKCluster(option.getNumZkServers());
1141    }
1142
1143    // Start the MiniHBaseCluster
1144    return startMiniHBaseCluster(option);
1145  }
1146
1147  /**
1148   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
1149   * {@link #startMiniCluster()}. This is useful when doing stepped startup of clusters.
1150   * @return Reference to the hbase mini hbase cluster.
1151   * @see #startMiniCluster(StartMiniClusterOption)
1152   * @see #shutdownMiniHBaseCluster()
1153   */
1154  public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option)
1155    throws IOException, InterruptedException {
1156    // Now do the mini hbase cluster. Set the hbase.rootdir in config.
1157    createRootDir(option.isCreateRootDir());
1158    if (option.isCreateWALDir()) {
1159      createWALRootDir();
1160    }
1161    // Set the hbase.fs.tmp.dir config to make sure that we have some default value. This is
1162    // for tests that do not read hbase-defaults.xml
1163    setHBaseFsTmpDir();
1164
1165    // These settings will make the server waits until this exact number of
1166    // regions servers are connected.
1167    if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
1168      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, option.getNumRegionServers());
1169    }
1170    if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
1171      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, option.getNumRegionServers());
1172    }
1173
1174    // Avoid log flooded with chore execution time, see HBASE-24646 for more details.
1175    Log4jUtils.setLogLevel(org.apache.hadoop.hbase.ScheduledChore.class.getName(), "INFO");
1176
1177    Configuration c = new Configuration(this.conf);
1178    TraceUtil.initTracer(c);
1179    this.hbaseCluster = new MiniHBaseCluster(c, option.getNumMasters(),
1180      option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(),
1181      option.getMasterClass(), option.getRsClass());
1182    // Populate the master address configuration from mini cluster configuration.
1183    conf.set(HConstants.MASTER_ADDRS_KEY, MasterRegistry.getMasterAddr(c));
1184    // Don't leave here till we've done a successful scan of the hbase:meta
1185    try (Table t = getConnection().getTable(TableName.META_TABLE_NAME);
1186      ResultScanner s = t.getScanner(new Scan())) {
1187      for (;;) {
1188        if (s.next() == null) {
1189          break;
1190        }
1191      }
1192    }
1193
1194
1195    getAdmin(); // create immediately the hbaseAdmin
1196    LOG.info("Minicluster is up; activeMaster={}", getHBaseCluster().getMaster());
1197
1198    return (MiniHBaseCluster) hbaseCluster;
1199  }
1200
1201  /**
1202   * Starts up mini hbase cluster using default options.
1203   * Default options can be found in {@link StartMiniClusterOption.Builder}.
1204   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1205   * @see #shutdownMiniHBaseCluster()
1206   */
1207  public MiniHBaseCluster startMiniHBaseCluster() throws IOException, InterruptedException {
1208    return startMiniHBaseCluster(StartMiniClusterOption.builder().build());
1209  }
1210
1211  /**
1212   * Starts up mini hbase cluster.
1213   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
1214   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1215   * @param numMasters Master node number.
1216   * @param numRegionServers Number of region servers.
1217   * @return The mini HBase cluster created.
1218   * @see #shutdownMiniHBaseCluster()
1219   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1220   *   {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1221   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1222   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1223   */
1224  @Deprecated
1225  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers)
1226      throws IOException, InterruptedException {
1227    StartMiniClusterOption option = StartMiniClusterOption.builder()
1228        .numMasters(numMasters).numRegionServers(numRegionServers).build();
1229    return startMiniHBaseCluster(option);
1230  }
1231
1232  /**
1233   * Starts up mini hbase cluster.
1234   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
1235   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1236   * @param numMasters Master node number.
1237   * @param numRegionServers Number of region servers.
1238   * @param rsPorts Ports that RegionServer should use.
1239   * @return The mini HBase cluster created.
1240   * @see #shutdownMiniHBaseCluster()
1241   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1242   *   {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1243   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1244   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1245   */
1246  @Deprecated
1247  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
1248      List<Integer> rsPorts) throws IOException, InterruptedException {
1249    StartMiniClusterOption option = StartMiniClusterOption.builder()
1250        .numMasters(numMasters).numRegionServers(numRegionServers).rsPorts(rsPorts).build();
1251    return startMiniHBaseCluster(option);
1252  }
1253
1254  /**
1255   * Starts up mini hbase cluster.
1256   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
1257   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1258   * @param numMasters Master node number.
1259   * @param numRegionServers Number of region servers.
1260   * @param rsPorts Ports that RegionServer should use.
1261   * @param masterClass The class to use as HMaster, or null for default.
1262   * @param rsClass The class to use as HRegionServer, or null for default.
1263   * @param createRootDir Whether to create a new root or data directory path.
1264   * @param createWALDir Whether to create a new WAL directory.
1265   * @return The mini HBase cluster created.
1266   * @see #shutdownMiniHBaseCluster()
1267   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1268   *   {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1269   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1270   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1271   */
1272  @Deprecated
1273  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
1274      List<Integer> rsPorts, Class<? extends HMaster> masterClass,
1275      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass,
1276      boolean createRootDir, boolean createWALDir) throws IOException, InterruptedException {
1277    StartMiniClusterOption option = StartMiniClusterOption.builder()
1278        .numMasters(numMasters).masterClass(masterClass)
1279        .numRegionServers(numRegionServers).rsClass(rsClass).rsPorts(rsPorts)
1280        .createRootDir(createRootDir).createWALDir(createWALDir).build();
1281    return startMiniHBaseCluster(option);
1282  }
1283
1284  /**
1285   * Starts the hbase cluster up again after shutting it down previously in a
1286   * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
1287   * @param servers number of region servers
1288   */
1289  public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
1290    this.restartHBaseCluster(servers, null);
1291  }
1292
1293  public void restartHBaseCluster(int servers, List<Integer> ports)
1294      throws IOException, InterruptedException {
1295    StartMiniClusterOption option =
1296        StartMiniClusterOption.builder().numRegionServers(servers).rsPorts(ports).build();
1297    restartHBaseCluster(option);
1298    invalidateConnection();
1299  }
1300
1301  public void restartHBaseCluster(StartMiniClusterOption option)
1302      throws IOException, InterruptedException {
1303    if (hbaseAdmin != null) {
1304      hbaseAdmin.close();
1305      hbaseAdmin = null;
1306    }
1307    if (this.connection != null) {
1308      this.connection.close();
1309      this.connection = null;
1310    }
1311    this.hbaseCluster =
1312        new MiniHBaseCluster(this.conf, option.getNumMasters(), option.getNumAlwaysStandByMasters(),
1313            option.getNumRegionServers(), option.getRsPorts(), option.getMasterClass(),
1314            option.getRsClass());
1315    // Don't leave here till we've done a successful scan of the hbase:meta
1316    Connection conn = ConnectionFactory.createConnection(this.conf);
1317    Table t = conn.getTable(TableName.META_TABLE_NAME);
1318    ResultScanner s = t.getScanner(new Scan());
1319    while (s.next() != null) {
1320      // do nothing
1321    }
1322    LOG.info("HBase has been restarted");
1323    s.close();
1324    t.close();
1325    conn.close();
1326  }
1327
1328  /**
1329   * @return Current mini hbase cluster. Only has something in it after a call
1330   * to {@link #startMiniCluster()}.
1331   * @see #startMiniCluster()
1332   */
1333  public MiniHBaseCluster getMiniHBaseCluster() {
1334    if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
1335      return (MiniHBaseCluster)this.hbaseCluster;
1336    }
1337    throw new RuntimeException(hbaseCluster + " not an instance of " +
1338                               MiniHBaseCluster.class.getName());
1339  }
1340
1341  /**
1342   * Stops mini hbase, zk, and hdfs clusters.
1343   * @see #startMiniCluster(int)
1344   */
1345  public void shutdownMiniCluster() throws IOException {
1346    LOG.info("Shutting down minicluster");
1347    shutdownMiniHBaseCluster();
1348    shutdownMiniDFSCluster();
1349    shutdownMiniZKCluster();
1350
1351    cleanupTestDir();
1352    miniClusterRunning = false;
1353    LOG.info("Minicluster is down");
1354  }
1355
1356  /**
1357   * Shutdown HBase mini cluster.Does not shutdown zk or dfs if running.
1358   * @throws java.io.IOException in case command is unsuccessful
1359   */
1360  public void shutdownMiniHBaseCluster() throws IOException {
1361    cleanup();
1362    if (this.hbaseCluster != null) {
1363      this.hbaseCluster.shutdown();
1364      // Wait till hbase is down before going on to shutdown zk.
1365      this.hbaseCluster.waitUntilShutDown();
1366      this.hbaseCluster = null;
1367    }
1368    if (zooKeeperWatcher != null) {
1369      zooKeeperWatcher.close();
1370      zooKeeperWatcher = null;
1371    }
1372  }
1373
1374  /**
1375   * Abruptly Shutdown HBase mini cluster. Does not shutdown zk or dfs if running.
1376   * @throws java.io.IOException throws in case command is unsuccessful
1377   */
1378  public void killMiniHBaseCluster() throws IOException {
1379    cleanup();
1380    if (this.hbaseCluster != null) {
1381      getMiniHBaseCluster().killAll();
1382      this.hbaseCluster = null;
1383    }
1384    if (zooKeeperWatcher != null) {
1385      zooKeeperWatcher.close();
1386      zooKeeperWatcher = null;
1387    }
1388  }
1389
1390  // close hbase admin, close current connection and reset MIN MAX configs for RS.
1391  private void cleanup() throws IOException {
1392    if (hbaseAdmin != null) {
1393      hbaseAdmin.close();
1394      hbaseAdmin = null;
1395    }
1396    if (this.connection != null) {
1397      this.connection.close();
1398      this.connection = null;
1399    }
1400    // unset the configuration for MIN and MAX RS to start
1401    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1402    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1403  }
1404
1405  /**
1406   * Returns the path to the default root dir the minicluster uses. If <code>create</code>
1407   * is true, a new root directory path is fetched irrespective of whether it has been fetched
1408   * before or not. If false, previous path is used.
1409   * Note: this does not cause the root dir to be created.
1410   * @return Fully qualified path for the default hbase root dir
1411   * @throws IOException
1412   */
1413  public Path getDefaultRootDirPath(boolean create) throws IOException {
1414    if (!create) {
1415      return getDataTestDirOnTestFS();
1416    } else {
1417      return getNewDataTestDirOnTestFS();
1418    }
1419  }
1420
1421  /**
1422   * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
1423   * except that <code>create</code> flag is false.
1424   * Note: this does not cause the root dir to be created.
1425   * @return Fully qualified path for the default hbase root dir
1426   * @throws IOException
1427   */
1428  public Path getDefaultRootDirPath() throws IOException {
1429    return getDefaultRootDirPath(false);
1430  }
1431
1432  /**
1433   * Creates an hbase rootdir in user home directory.  Also creates hbase
1434   * version file.  Normally you won't make use of this method.  Root hbasedir
1435   * is created for you as part of mini cluster startup.  You'd only use this
1436   * method if you were doing manual operation.
1437   * @param create This flag decides whether to get a new
1438   * root or data directory path or not, if it has been fetched already.
1439   * Note : Directory will be made irrespective of whether path has been fetched or not.
1440   * If directory already exists, it will be overwritten
1441   * @return Fully qualified path to hbase root dir
1442   * @throws IOException
1443   */
1444  public Path createRootDir(boolean create) throws IOException {
1445    FileSystem fs = FileSystem.get(this.conf);
1446    Path hbaseRootdir = getDefaultRootDirPath(create);
1447    CommonFSUtils.setRootDir(this.conf, hbaseRootdir);
1448    fs.mkdirs(hbaseRootdir);
1449    FSUtils.setVersion(fs, hbaseRootdir);
1450    return hbaseRootdir;
1451  }
1452
1453  /**
1454   * Same as {@link HBaseTestingUtility#createRootDir(boolean create)}
1455   * except that <code>create</code> flag is false.
1456   * @return Fully qualified path to hbase root dir
1457   * @throws IOException
1458   */
1459  public Path createRootDir() throws IOException {
1460    return createRootDir(false);
1461  }
1462
1463  /**
1464   * Creates a hbase walDir in the user's home directory.
1465   * Normally you won't make use of this method. Root hbaseWALDir
1466   * is created for you as part of mini cluster startup. You'd only use this
1467   * method if you were doing manual operation.
1468   *
1469   * @return Fully qualified path to hbase root dir
1470   * @throws IOException
1471  */
1472  public Path createWALRootDir() throws IOException {
1473    FileSystem fs = FileSystem.get(this.conf);
1474    Path walDir = getNewDataTestDirOnTestFS();
1475    CommonFSUtils.setWALRootDir(this.conf, walDir);
1476    fs.mkdirs(walDir);
1477    return walDir;
1478  }
1479
1480  private void setHBaseFsTmpDir() throws IOException {
1481    String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
1482    if (hbaseFsTmpDirInString == null) {
1483      this.conf.set("hbase.fs.tmp.dir",  getDataTestDirOnTestFS("hbase-staging").toString());
1484      LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir"));
1485    } else {
1486      LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
1487    }
1488  }
1489
1490  /**
1491   * Flushes all caches in the mini hbase cluster
1492   * @throws IOException
1493   */
1494  public void flush() throws IOException {
1495    getMiniHBaseCluster().flushcache();
1496  }
1497
1498  /**
1499   * Flushes all caches in the mini hbase cluster
1500   * @throws IOException
1501   */
1502  public void flush(TableName tableName) throws IOException {
1503    getMiniHBaseCluster().flushcache(tableName);
1504  }
1505
1506  /**
1507   * Compact all regions in the mini hbase cluster
1508   * @throws IOException
1509   */
1510  public void compact(boolean major) throws IOException {
1511    getMiniHBaseCluster().compact(major);
1512  }
1513
1514  /**
1515   * Compact all of a table's reagion in the mini hbase cluster
1516   * @throws IOException
1517   */
1518  public void compact(TableName tableName, boolean major) throws IOException {
1519    getMiniHBaseCluster().compact(tableName, major);
1520  }
1521
1522  /**
1523   * Create a table.
1524   * @param tableName
1525   * @param family
1526   * @return A Table instance for the created table.
1527   * @throws IOException
1528   */
1529  public Table createTable(TableName tableName, String family)
1530  throws IOException{
1531    return createTable(tableName, new String[]{family});
1532  }
1533
1534  /**
1535   * Create a table.
1536   * @param tableName
1537   * @param families
1538   * @return A Table instance for the created table.
1539   * @throws IOException
1540   */
1541  public Table createTable(TableName tableName, String[] families)
1542  throws IOException {
1543    List<byte[]> fams = new ArrayList<>(families.length);
1544    for (String family : families) {
1545      fams.add(Bytes.toBytes(family));
1546    }
1547    return createTable(tableName, fams.toArray(new byte[0][]));
1548  }
1549
1550  /**
1551   * Create a table.
1552   * @param tableName
1553   * @param family
1554   * @return A Table instance for the created table.
1555   * @throws IOException
1556   */
1557  public Table createTable(TableName tableName, byte[] family)
1558  throws IOException{
1559    return createTable(tableName, new byte[][]{family});
1560  }
1561
1562  /**
1563   * Create a table with multiple regions.
1564   * @param tableName
1565   * @param family
1566   * @param numRegions
1567   * @return A Table instance for the created table.
1568   * @throws IOException
1569   */
1570  public Table createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
1571      throws IOException {
1572    if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1573    byte[] startKey = Bytes.toBytes("aaaaa");
1574    byte[] endKey = Bytes.toBytes("zzzzz");
1575    byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1576
1577    return createTable(tableName, new byte[][] { family }, splitKeys);
1578  }
1579
1580  /**
1581   * Create a table.
1582   * @param tableName
1583   * @param families
1584   * @return A Table instance for the created table.
1585   * @throws IOException
1586   */
1587  public Table createTable(TableName tableName, byte[][] families)
1588  throws IOException {
1589    return createTable(tableName, families, (byte[][]) null);
1590  }
1591
1592  /**
1593   * Create a table with multiple regions.
1594   * @param tableName
1595   * @param families
1596   * @return A Table instance for the created table.
1597   * @throws IOException
1598   */
1599  public Table createMultiRegionTable(TableName tableName, byte[][] families) throws IOException {
1600    return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE);
1601  }
1602
1603  /**
1604   * Create a table.
1605   * @param tableName
1606   * @param families
1607   * @param splitKeys
1608   * @return A Table instance for the created table.
1609   * @throws IOException
1610   */
1611  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys)
1612      throws IOException {
1613    return createTable(tableName, families, splitKeys, 1, new Configuration(getConfiguration()));
1614  }
1615
1616  /**
1617   * Create a table.
1618   * @param tableName the table name
1619   * @param families the families
1620   * @param splitKeys the splitkeys
1621   * @param replicaCount the region replica count
1622   * @return A Table instance for the created table.
1623   * @throws IOException throws IOException
1624   */
1625  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
1626      int replicaCount) throws IOException {
1627    return createTable(tableName, families, splitKeys, replicaCount,
1628      new Configuration(getConfiguration()));
1629  }
1630
1631  public Table createTable(TableName tableName, byte[][] families,
1632      int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1633  throws IOException{
1634    HTableDescriptor desc = createTableDescriptor(tableName, families, numVersions);
1635
1636    getAdmin().createTable(desc, startKey, endKey, numRegions);
1637    // HBaseAdmin only waits for regions to appear in hbase:meta we
1638    // should wait until they are assigned
1639    waitUntilAllRegionsAssigned(tableName);
1640    return getConnection().getTable(tableName);
1641  }
1642
1643  /**
1644   * Create a table.
1645   * @param htd
1646   * @param families
1647   * @param c Configuration to use
1648   * @return A Table instance for the created table.
1649   * @throws IOException
1650   */
1651  public Table createTable(TableDescriptor htd, byte[][] families, Configuration c)
1652  throws IOException {
1653    return createTable(htd, families, null, c);
1654  }
1655
1656  /**
1657   * Create a table.
1658   * @param htd table descriptor
1659   * @param families array of column families
1660   * @param splitKeys array of split keys
1661   * @param c Configuration to use
1662   * @return A Table instance for the created table.
1663   * @throws IOException if getAdmin or createTable fails
1664   */
1665  public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
1666      Configuration c) throws IOException {
1667    // Disable blooms (they are on by default as of 0.95) but we disable them here because
1668    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1669    // on is interfering.
1670    return createTable(htd, families, splitKeys, BloomType.NONE, HConstants.DEFAULT_BLOCKSIZE, c);
1671  }
1672
1673  /**
1674   * Create a table.
1675   * @param htd table descriptor
1676   * @param families array of column families
1677   * @param splitKeys array of split keys
1678   * @param type Bloom type
1679   * @param blockSize block size
1680   * @param c Configuration to use
1681   * @return A Table instance for the created table.
1682   * @throws IOException if getAdmin or createTable fails
1683   */
1684
1685  public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
1686      BloomType type, int blockSize, Configuration c) throws IOException {
1687    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
1688    for (byte[] family : families) {
1689      ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder.newBuilder(family)
1690        .setBloomFilterType(type)
1691        .setBlocksize(blockSize);
1692      if (isNewVersionBehaviorEnabled()) {
1693          cfdb.setNewVersionBehavior(true);
1694      }
1695      builder.setColumnFamily(cfdb.build());
1696    }
1697    TableDescriptor td = builder.build();
1698    getAdmin().createTable(td, splitKeys);
1699    // HBaseAdmin only waits for regions to appear in hbase:meta
1700    // we should wait until they are assigned
1701    waitUntilAllRegionsAssigned(td.getTableName());
1702    return getConnection().getTable(td.getTableName());
1703  }
1704
1705  /**
1706   * Create a table.
1707   * @param htd table descriptor
1708   * @param splitRows array of split keys
1709   * @return A Table instance for the created table.
1710   * @throws IOException
1711   */
1712  public Table createTable(TableDescriptor htd, byte[][] splitRows)
1713      throws IOException {
1714    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
1715    if (isNewVersionBehaviorEnabled()) {
1716      for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
1717         builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family)
1718           .setNewVersionBehavior(true).build());
1719      }
1720    }
1721    getAdmin().createTable(builder.build(), splitRows);
1722    // HBaseAdmin only waits for regions to appear in hbase:meta
1723    // we should wait until they are assigned
1724    waitUntilAllRegionsAssigned(htd.getTableName());
1725    return getConnection().getTable(htd.getTableName());
1726  }
1727
1728  /**
1729   * Create a table.
1730   * @param tableName the table name
1731   * @param families the families
1732   * @param splitKeys the split keys
1733   * @param replicaCount the replica count
1734   * @param c Configuration to use
1735   * @return A Table instance for the created table.
1736   * @throws IOException
1737   */
1738  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
1739      int replicaCount, final Configuration c) throws IOException {
1740    HTableDescriptor htd = new HTableDescriptor(tableName);
1741    htd.setRegionReplication(replicaCount);
1742    return createTable(htd, families, splitKeys, c);
1743  }
1744
1745  /**
1746   * Create a table.
1747   * @param tableName
1748   * @param family
1749   * @param numVersions
1750   * @return A Table instance for the created table.
1751   * @throws IOException
1752   */
1753  public Table createTable(TableName tableName, byte[] family, int numVersions)
1754  throws IOException {
1755    return createTable(tableName, new byte[][]{family}, numVersions);
1756  }
1757
1758  /**
1759   * Create a table.
1760   * @param tableName
1761   * @param families
1762   * @param numVersions
1763   * @return A Table instance for the created table.
1764   * @throws IOException
1765   */
1766  public Table createTable(TableName tableName, byte[][] families, int numVersions)
1767      throws IOException {
1768    return createTable(tableName, families, numVersions, (byte[][]) null);
1769  }
1770
1771  /**
1772   * Create a table.
1773   * @param tableName
1774   * @param families
1775   * @param numVersions
1776   * @param splitKeys
1777   * @return A Table instance for the created table.
1778   * @throws IOException
1779   */
1780  public Table createTable(TableName tableName, byte[][] families, int numVersions,
1781      byte[][] splitKeys) throws IOException {
1782    HTableDescriptor desc = new HTableDescriptor(tableName);
1783    for (byte[] family : families) {
1784      HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1785      if (isNewVersionBehaviorEnabled()) {
1786        hcd.setNewVersionBehavior(true);
1787      }
1788      desc.addFamily(hcd);
1789    }
1790    getAdmin().createTable(desc, splitKeys);
1791    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1792    // assigned
1793    waitUntilAllRegionsAssigned(tableName);
1794    return getConnection().getTable(tableName);
1795  }
1796
1797  /**
1798   * Create a table with multiple regions.
1799   * @param tableName
1800   * @param families
1801   * @param numVersions
1802   * @return A Table instance for the created table.
1803   * @throws IOException
1804   */
1805  public Table createMultiRegionTable(TableName tableName, byte[][] families, int numVersions)
1806      throws IOException {
1807    return createTable(tableName, families, numVersions, KEYS_FOR_HBA_CREATE_TABLE);
1808  }
1809
1810  /**
1811   * Create a table.
1812   * @param tableName
1813   * @param families
1814   * @param numVersions
1815   * @param blockSize
1816   * @return A Table instance for the created table.
1817   * @throws IOException
1818   */
1819  public Table createTable(TableName tableName, byte[][] families,
1820    int numVersions, int blockSize) throws IOException {
1821    HTableDescriptor desc = new HTableDescriptor(tableName);
1822    for (byte[] family : families) {
1823      HColumnDescriptor hcd = new HColumnDescriptor(family)
1824          .setMaxVersions(numVersions)
1825          .setBlocksize(blockSize);
1826      if (isNewVersionBehaviorEnabled()) {
1827        hcd.setNewVersionBehavior(true);
1828      }
1829      desc.addFamily(hcd);
1830    }
1831    getAdmin().createTable(desc);
1832    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1833    // assigned
1834    waitUntilAllRegionsAssigned(tableName);
1835    return getConnection().getTable(tableName);
1836  }
1837
1838  public Table createTable(TableName tableName, byte[][] families,
1839      int numVersions, int blockSize, String cpName) throws IOException {
1840      HTableDescriptor desc = new HTableDescriptor(tableName);
1841      for (byte[] family : families) {
1842        HColumnDescriptor hcd = new HColumnDescriptor(family)
1843            .setMaxVersions(numVersions)
1844            .setBlocksize(blockSize);
1845        if (isNewVersionBehaviorEnabled()) {
1846          hcd.setNewVersionBehavior(true);
1847        }
1848        desc.addFamily(hcd);
1849      }
1850      if(cpName != null) {
1851        desc.addCoprocessor(cpName);
1852      }
1853      getAdmin().createTable(desc);
1854      // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1855      // assigned
1856      waitUntilAllRegionsAssigned(tableName);
1857      return getConnection().getTable(tableName);
1858    }
1859
1860  /**
1861   * Create a table.
1862   * @param tableName
1863   * @param families
1864   * @param numVersions
1865   * @return A Table instance for the created table.
1866   * @throws IOException
1867   */
1868  public Table createTable(TableName tableName, byte[][] families,
1869      int[] numVersions)
1870  throws IOException {
1871    HTableDescriptor desc = new HTableDescriptor(tableName);
1872    int i = 0;
1873    for (byte[] family : families) {
1874      HColumnDescriptor hcd = new HColumnDescriptor(family)
1875          .setMaxVersions(numVersions[i]);
1876      if (isNewVersionBehaviorEnabled()) {
1877        hcd.setNewVersionBehavior(true);
1878      }
1879      desc.addFamily(hcd);
1880      i++;
1881    }
1882    getAdmin().createTable(desc);
1883    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1884    // assigned
1885    waitUntilAllRegionsAssigned(tableName);
1886    return getConnection().getTable(tableName);
1887  }
1888
1889  /**
1890   * Create a table.
1891   * @param tableName
1892   * @param family
1893   * @param splitRows
1894   * @return A Table instance for the created table.
1895   * @throws IOException
1896   */
1897  public Table createTable(TableName tableName, byte[] family, byte[][] splitRows)
1898      throws IOException {
1899    HTableDescriptor desc = new HTableDescriptor(tableName);
1900    HColumnDescriptor hcd = new HColumnDescriptor(family);
1901    if (isNewVersionBehaviorEnabled()) {
1902      hcd.setNewVersionBehavior(true);
1903    }
1904    desc.addFamily(hcd);
1905    getAdmin().createTable(desc, splitRows);
1906    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1907    // assigned
1908    waitUntilAllRegionsAssigned(tableName);
1909    return getConnection().getTable(tableName);
1910  }
1911
1912  /**
1913   * Create a table with multiple regions.
1914   * @param tableName
1915   * @param family
1916   * @return A Table instance for the created table.
1917   * @throws IOException
1918   */
1919  public Table createMultiRegionTable(TableName tableName, byte[] family) throws IOException {
1920    return createTable(tableName, family, KEYS_FOR_HBA_CREATE_TABLE);
1921  }
1922
1923  /**
1924   * Modify a table, synchronous. Waiting logic similar to that of {@code admin.rb#alter_status}.
1925   */
1926  @SuppressWarnings("serial")
1927  public static void modifyTableSync(Admin admin, TableDescriptor desc)
1928      throws IOException, InterruptedException {
1929    admin.modifyTable(desc);
1930    Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
1931      setFirst(0);
1932      setSecond(0);
1933    }};
1934    int i = 0;
1935    do {
1936      status = admin.getAlterStatus(desc.getTableName());
1937      if (status.getSecond() != 0) {
1938        LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond()
1939          + " regions updated.");
1940        Thread.sleep(1 * 1000L);
1941      } else {
1942        LOG.debug("All regions updated.");
1943        break;
1944      }
1945    } while (status.getFirst() != 0 && i++ < 500);
1946    if (status.getFirst() != 0) {
1947      throw new IOException("Failed to update all regions even after 500 seconds.");
1948    }
1949  }
1950
1951  /**
1952   * Set the number of Region replicas.
1953   */
1954  public static void setReplicas(Admin admin, TableName table, int replicaCount)
1955      throws IOException, InterruptedException {
1956    admin.disableTable(table);
1957    HTableDescriptor desc = new HTableDescriptor(admin.getTableDescriptor(table));
1958    desc.setRegionReplication(replicaCount);
1959    admin.modifyTable(desc.getTableName(), desc);
1960    admin.enableTable(table);
1961  }
1962
1963  /**
1964   * Drop an existing table
1965   * @param tableName existing table
1966   */
1967  public void deleteTable(TableName tableName) throws IOException {
1968    try {
1969      getAdmin().disableTable(tableName);
1970    } catch (TableNotEnabledException e) {
1971      LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1972    }
1973    getAdmin().deleteTable(tableName);
1974  }
1975
1976  /**
1977   * Drop an existing table
1978   * @param tableName existing table
1979   */
1980  public void deleteTableIfAny(TableName tableName) throws IOException {
1981    try {
1982      deleteTable(tableName);
1983    } catch (TableNotFoundException e) {
1984      // ignore
1985    }
1986  }
1987
1988  // ==========================================================================
1989  // Canned table and table descriptor creation
1990  // TODO replace HBaseTestCase
1991
1992  public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1993  public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1994  public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1995  public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1996  private static final int MAXVERSIONS = 3;
1997
1998  public static final char FIRST_CHAR = 'a';
1999  public static final char LAST_CHAR = 'z';
2000  public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
2001  public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
2002
2003  /**
2004   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
2005   *   {@link #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)} instead.
2006   * @see #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)
2007   * @see <a href="https://issues.apache.org/jira/browse/HBASE-13893">HBASE-13893</a>
2008   */
2009  @Deprecated
2010  public HTableDescriptor createTableDescriptor(final String name,
2011      final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
2012    return this.createTableDescriptor(TableName.valueOf(name), minVersions, versions, ttl,
2013        keepDeleted);
2014  }
2015
2016  /**
2017   * Create a table of name <code>name</code>.
2018   * @param name Name to give table.
2019   * @return Column descriptor.
2020   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
2021   *   {@link #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)} instead.
2022   * @see #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)
2023   * @see <a href="https://issues.apache.org/jira/browse/HBASE-13893">HBASE-13893</a>
2024   */
2025  @Deprecated
2026  public HTableDescriptor createTableDescriptor(final String name) {
2027    return createTableDescriptor(TableName.valueOf(name),  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
2028        MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
2029  }
2030
2031  public HTableDescriptor createTableDescriptor(final TableName name,
2032      final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
2033    HTableDescriptor htd = new HTableDescriptor(name);
2034    for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
2035      HColumnDescriptor hcd = new HColumnDescriptor(cfName)
2036          .setMinVersions(minVersions)
2037          .setMaxVersions(versions)
2038          .setKeepDeletedCells(keepDeleted)
2039          .setBlockCacheEnabled(false)
2040          .setTimeToLive(ttl);
2041      if (isNewVersionBehaviorEnabled()) {
2042          hcd.setNewVersionBehavior(true);
2043      }
2044      htd.addFamily(hcd);
2045    }
2046    return htd;
2047  }
2048
2049  /**
2050   * Create a table of name <code>name</code>.
2051   * @param name Name to give table.
2052   * @return Column descriptor.
2053   */
2054  public HTableDescriptor createTableDescriptor(final TableName name) {
2055    return createTableDescriptor(name,  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
2056        MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
2057  }
2058
2059  public HTableDescriptor createTableDescriptor(final TableName tableName,
2060      byte[] family) {
2061    return createTableDescriptor(tableName, new byte[][] {family}, 1);
2062  }
2063
2064  public HTableDescriptor createTableDescriptor(final TableName tableName,
2065      byte[][] families, int maxVersions) {
2066    HTableDescriptor desc = new HTableDescriptor(tableName);
2067    for (byte[] family : families) {
2068      HColumnDescriptor hcd = new HColumnDescriptor(family)
2069          .setMaxVersions(maxVersions);
2070      if (isNewVersionBehaviorEnabled()) {
2071          hcd.setNewVersionBehavior(true);
2072      }
2073      desc.addFamily(hcd);
2074    }
2075    return desc;
2076  }
2077
2078  /**
2079   * Create an HRegion that writes to the local tmp dirs
2080   * @param desc a table descriptor indicating which table the region belongs to
2081   * @param startKey the start boundary of the region
2082   * @param endKey the end boundary of the region
2083   * @return a region that writes to local dir for testing
2084   * @throws IOException
2085   */
2086  public HRegion createLocalHRegion(TableDescriptor desc, byte [] startKey,
2087      byte [] endKey)
2088  throws IOException {
2089    HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
2090    return createLocalHRegion(hri, desc);
2091  }
2092
2093  /**
2094   * Create an HRegion that writes to the local tmp dirs. Creates the WAL for you. Be sure to call
2095   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when you're finished with it.
2096   */
2097  public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) throws IOException {
2098    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), desc);
2099  }
2100
2101  /**
2102   * Create an HRegion that writes to the local tmp dirs with specified wal
2103   * @param info regioninfo
2104   * @param desc table descriptor
2105   * @param wal wal for this region.
2106   * @return created hregion
2107   * @throws IOException
2108   */
2109  public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc, WAL wal)
2110      throws IOException {
2111    return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, wal);
2112  }
2113
2114  /**
2115   * @param tableName the name of the table
2116   * @param startKey the start key of the region
2117   * @param stopKey the stop key of the region
2118   * @param callingMethod the name of the calling method probably a test method
2119   * @param conf the configuration to use
2120   * @param isReadOnly {@code true} if the table is read only, {@code false} otherwise
2121   * @param families the column families to use
2122   * @throws IOException if an IO problem is encountered
2123   * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)}
2124   *         when done.
2125   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
2126   *   {@link #createLocalHRegion(TableName, byte[], byte[], boolean, Durability, WAL, byte[]...)}
2127   *   instead.
2128   * @see #createLocalHRegion(TableName, byte[], byte[], boolean, Durability, WAL, byte[]...)
2129   * @see <a href="https://issues.apache.org/jira/browse/HBASE-13893">HBASE-13893</a>
2130   */
2131  @Deprecated
2132  public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
2133      String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
2134      WAL wal, byte[]... families) throws IOException {
2135    return this
2136        .createLocalHRegion(TableName.valueOf(tableName), startKey, stopKey, isReadOnly, durability,
2137            wal, families);
2138  }
2139
2140  /**
2141   * @param tableName
2142   * @param startKey
2143   * @param stopKey
2144   * @param isReadOnly
2145   * @param families
2146   * @return A region on which you must call
2147   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
2148   * @throws IOException
2149   */
2150  public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
2151      boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException {
2152    return createLocalHRegionWithInMemoryFlags(tableName,startKey, stopKey, isReadOnly,
2153        durability, wal, null, families);
2154  }
2155
2156  public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] startKey,
2157      byte[] stopKey,
2158      boolean isReadOnly, Durability durability, WAL wal, boolean[] compactedMemStore,
2159      byte[]... families)
2160      throws IOException {
2161    HTableDescriptor htd = new HTableDescriptor(tableName);
2162    htd.setReadOnly(isReadOnly);
2163    int i=0;
2164    for (byte[] family : families) {
2165      HColumnDescriptor hcd = new HColumnDescriptor(family);
2166      if(compactedMemStore != null && i < compactedMemStore.length) {
2167        hcd.setInMemoryCompaction(MemoryCompactionPolicy.BASIC);
2168      } else {
2169        hcd.setInMemoryCompaction(MemoryCompactionPolicy.NONE);
2170
2171      }
2172      i++;
2173      // Set default to be three versions.
2174      hcd.setMaxVersions(Integer.MAX_VALUE);
2175      htd.addFamily(hcd);
2176    }
2177    htd.setDurability(durability);
2178    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
2179    return createLocalHRegion(info, htd, wal);
2180  }
2181
2182  //
2183  // ==========================================================================
2184
2185  /**
2186   * Provide an existing table name to truncate.
2187   * Scans the table and issues a delete for each row read.
2188   * @param tableName existing table
2189   * @return HTable to that new table
2190   * @throws IOException
2191   */
2192  public Table deleteTableData(TableName tableName) throws IOException {
2193    Table table = getConnection().getTable(tableName);
2194    Scan scan = new Scan();
2195    ResultScanner resScan = table.getScanner(scan);
2196    for(Result res : resScan) {
2197      Delete del = new Delete(res.getRow());
2198      table.delete(del);
2199    }
2200    resScan = table.getScanner(scan);
2201    resScan.close();
2202    return table;
2203  }
2204
2205  /**
2206   * Truncate a table using the admin command.
2207   * Effectively disables, deletes, and recreates the table.
2208   * @param tableName table which must exist.
2209   * @param preserveRegions keep the existing split points
2210   * @return HTable for the new table
2211   */
2212  public Table truncateTable(final TableName tableName, final boolean preserveRegions) throws
2213      IOException {
2214    Admin admin = getAdmin();
2215    if (!admin.isTableDisabled(tableName)) {
2216      admin.disableTable(tableName);
2217    }
2218    admin.truncateTable(tableName, preserveRegions);
2219    return getConnection().getTable(tableName);
2220  }
2221
2222  /**
2223   * Truncate a table using the admin command.
2224   * Effectively disables, deletes, and recreates the table.
2225   * For previous behavior of issuing row deletes, see
2226   * deleteTableData.
2227   * Expressly does not preserve regions of existing table.
2228   * @param tableName table which must exist.
2229   * @return HTable for the new table
2230   */
2231  public Table truncateTable(final TableName tableName) throws IOException {
2232    return truncateTable(tableName, false);
2233  }
2234
2235  /**
2236   * Load table with rows from 'aaa' to 'zzz'.
2237   * @param t Table
2238   * @param f Family
2239   * @return Count of rows loaded.
2240   * @throws IOException
2241   */
2242  public int loadTable(final Table t, final byte[] f) throws IOException {
2243    return loadTable(t, new byte[][] {f});
2244  }
2245
2246  /**
2247   * Load table with rows from 'aaa' to 'zzz'.
2248   * @param t Table
2249   * @param f Family
2250   * @return Count of rows loaded.
2251   * @throws IOException
2252   */
2253  public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
2254    return loadTable(t, new byte[][] {f}, null, writeToWAL);
2255  }
2256
2257  /**
2258   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2259   * @param t Table
2260   * @param f Array of Families to load
2261   * @return Count of rows loaded.
2262   * @throws IOException
2263   */
2264  public int loadTable(final Table t, final byte[][] f) throws IOException {
2265    return loadTable(t, f, null);
2266  }
2267
2268  /**
2269   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2270   * @param t Table
2271   * @param f Array of Families to load
2272   * @param value the values of the cells. If null is passed, the row key is used as value
2273   * @return Count of rows loaded.
2274   * @throws IOException
2275   */
2276  public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
2277    return loadTable(t, f, value, true);
2278  }
2279
2280  /**
2281   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2282   * @param t Table
2283   * @param f Array of Families to load
2284   * @param value the values of the cells. If null is passed, the row key is used as value
2285   * @return Count of rows loaded.
2286   * @throws IOException
2287   */
2288  public int loadTable(final Table t, final byte[][] f, byte[] value,
2289      boolean writeToWAL) throws IOException {
2290    List<Put> puts = new ArrayList<>();
2291    for (byte[] row : HBaseTestingUtility.ROWS) {
2292      Put put = new Put(row);
2293      put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
2294      for (int i = 0; i < f.length; i++) {
2295        byte[] value1 = value != null ? value : row;
2296        put.addColumn(f[i], f[i], value1);
2297      }
2298      puts.add(put);
2299    }
2300    t.put(puts);
2301    return puts.size();
2302  }
2303
2304  /** A tracker for tracking and validating table rows
2305   * generated with {@link HBaseTestingUtility#loadTable(Table, byte[])}
2306   */
2307  public static class SeenRowTracker {
2308    int dim = 'z' - 'a' + 1;
2309    int[][][] seenRows = new int[dim][dim][dim]; //count of how many times the row is seen
2310    byte[] startRow;
2311    byte[] stopRow;
2312
2313    public SeenRowTracker(byte[] startRow, byte[] stopRow) {
2314      this.startRow = startRow;
2315      this.stopRow = stopRow;
2316    }
2317
2318    void reset() {
2319      for (byte[] row : ROWS) {
2320        seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
2321      }
2322    }
2323
2324    int i(byte b) {
2325      return b - 'a';
2326    }
2327
2328    public void addRow(byte[] row) {
2329      seenRows[i(row[0])][i(row[1])][i(row[2])]++;
2330    }
2331
2332    /** Validate that all the rows between startRow and stopRow are seen exactly once, and
2333     * all other rows none
2334     */
2335    public void validate() {
2336      for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2337        for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2338          for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2339            int count = seenRows[i(b1)][i(b2)][i(b3)];
2340            int expectedCount = 0;
2341            if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
2342                && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
2343              expectedCount = 1;
2344            }
2345            if (count != expectedCount) {
2346              String row = new String(new byte[] {b1,b2,b3}, StandardCharsets.UTF_8);
2347              throw new RuntimeException("Row:" + row + " has a seen count of " + count + " " +
2348                  "instead of " + expectedCount);
2349            }
2350          }
2351        }
2352      }
2353    }
2354  }
2355
2356  public int loadRegion(final HRegion r, final byte[] f) throws IOException {
2357    return loadRegion(r, f, false);
2358  }
2359
2360  public int loadRegion(final Region r, final byte[] f) throws IOException {
2361    return loadRegion((HRegion)r, f);
2362  }
2363
2364  /**
2365   * Load region with rows from 'aaa' to 'zzz'.
2366   * @param r Region
2367   * @param f Family
2368   * @param flush flush the cache if true
2369   * @return Count of rows loaded.
2370   * @throws IOException
2371   */
2372  public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
2373  throws IOException {
2374    byte[] k = new byte[3];
2375    int rowCount = 0;
2376    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2377      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2378        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2379          k[0] = b1;
2380          k[1] = b2;
2381          k[2] = b3;
2382          Put put = new Put(k);
2383          put.setDurability(Durability.SKIP_WAL);
2384          put.addColumn(f, null, k);
2385          if (r.getWAL() == null) {
2386            put.setDurability(Durability.SKIP_WAL);
2387          }
2388          int preRowCount = rowCount;
2389          int pause = 10;
2390          int maxPause = 1000;
2391          while (rowCount == preRowCount) {
2392            try {
2393              r.put(put);
2394              rowCount++;
2395            } catch (RegionTooBusyException e) {
2396              pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
2397              Threads.sleep(pause);
2398            }
2399          }
2400        }
2401      }
2402      if (flush) {
2403        r.flush(true);
2404      }
2405    }
2406    return rowCount;
2407  }
2408
2409  public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow)
2410      throws IOException {
2411    for (int i = startRow; i < endRow; i++) {
2412      byte[] data = Bytes.toBytes(String.valueOf(i));
2413      Put put = new Put(data);
2414      put.addColumn(f, null, data);
2415      t.put(put);
2416    }
2417  }
2418
2419  public void loadRandomRows(final Table t, final byte[] f, int rowSize, int totalRows)
2420      throws IOException {
2421    Random r = new Random();
2422    byte[] row = new byte[rowSize];
2423    for (int i = 0; i < totalRows; i++) {
2424      r.nextBytes(row);
2425      Put put = new Put(row);
2426      put.addColumn(f, new byte[]{0}, new byte[]{0});
2427      t.put(put);
2428    }
2429  }
2430
2431  public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
2432      int replicaId)
2433      throws IOException {
2434    for (int i = startRow; i < endRow; i++) {
2435      String failMsg = "Failed verification of row :" + i;
2436      byte[] data = Bytes.toBytes(String.valueOf(i));
2437      Get get = new Get(data);
2438      get.setReplicaId(replicaId);
2439      get.setConsistency(Consistency.TIMELINE);
2440      Result result = table.get(get);
2441      assertTrue(failMsg, result.containsColumn(f, null));
2442      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
2443      Cell cell = result.getColumnLatestCell(f, null);
2444      assertTrue(failMsg,
2445        Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
2446          cell.getValueLength()));
2447    }
2448  }
2449
2450  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow)
2451      throws IOException {
2452    verifyNumericRows((HRegion)region, f, startRow, endRow);
2453  }
2454
2455  public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow)
2456      throws IOException {
2457    verifyNumericRows(region, f, startRow, endRow, true);
2458  }
2459
2460  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow,
2461      final boolean present) throws IOException {
2462    verifyNumericRows((HRegion)region, f, startRow, endRow, present);
2463  }
2464
2465  public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
2466      final boolean present) throws IOException {
2467    for (int i = startRow; i < endRow; i++) {
2468      String failMsg = "Failed verification of row :" + i;
2469      byte[] data = Bytes.toBytes(String.valueOf(i));
2470      Result result = region.get(new Get(data));
2471
2472      boolean hasResult = result != null && !result.isEmpty();
2473      assertEquals(failMsg + result, present, hasResult);
2474      if (!present) continue;
2475
2476      assertTrue(failMsg, result.containsColumn(f, null));
2477      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
2478      Cell cell = result.getColumnLatestCell(f, null);
2479      assertTrue(failMsg,
2480        Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
2481          cell.getValueLength()));
2482    }
2483  }
2484
2485  public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow)
2486      throws IOException {
2487    for (int i = startRow; i < endRow; i++) {
2488      byte[] data = Bytes.toBytes(String.valueOf(i));
2489      Delete delete = new Delete(data);
2490      delete.addFamily(f);
2491      t.delete(delete);
2492    }
2493  }
2494
2495  /**
2496   * Return the number of rows in the given table.
2497   * @param table to count rows
2498   * @return count of rows
2499   */
2500  public int countRows(final Table table) throws IOException {
2501    return countRows(table, new Scan());
2502  }
2503
2504  public int countRows(final Table table, final Scan scan) throws IOException {
2505    try (ResultScanner results = table.getScanner(scan)) {
2506      int count = 0;
2507      while (results.next() != null) {
2508        count++;
2509      }
2510      return count;
2511    }
2512  }
2513
2514  public int countRows(final Table table, final byte[]... families) throws IOException {
2515    Scan scan = new Scan();
2516    for (byte[] family: families) {
2517      scan.addFamily(family);
2518    }
2519    return countRows(table, scan);
2520  }
2521
2522  /**
2523   * Return the number of rows in the given table.
2524   */
2525  public int countRows(final TableName tableName) throws IOException {
2526    Table table = getConnection().getTable(tableName);
2527    try {
2528      return countRows(table);
2529    } finally {
2530      table.close();
2531    }
2532  }
2533
2534  public int countRows(final Region region) throws IOException {
2535    return countRows(region, new Scan());
2536  }
2537
2538  public int countRows(final Region region, final Scan scan) throws IOException {
2539    InternalScanner scanner = region.getScanner(scan);
2540    try {
2541      return countRows(scanner);
2542    } finally {
2543      scanner.close();
2544    }
2545  }
2546
2547  public int countRows(final InternalScanner scanner) throws IOException {
2548    int scannedCount = 0;
2549    List<Cell> results = new ArrayList<>();
2550    boolean hasMore = true;
2551    while (hasMore) {
2552      hasMore = scanner.next(results);
2553      scannedCount += results.size();
2554      results.clear();
2555    }
2556    return scannedCount;
2557  }
2558
2559  /**
2560   * Return an md5 digest of the entire contents of a table.
2561   */
2562  public String checksumRows(final Table table) throws Exception {
2563
2564    Scan scan = new Scan();
2565    ResultScanner results = table.getScanner(scan);
2566    MessageDigest digest = MessageDigest.getInstance("MD5");
2567    for (Result res : results) {
2568      digest.update(res.getRow());
2569    }
2570    results.close();
2571    return digest.toString();
2572  }
2573
2574  /** All the row values for the data loaded by {@link #loadTable(Table, byte[])} */
2575  public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
2576  static {
2577    int i = 0;
2578    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2579      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2580        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2581          ROWS[i][0] = b1;
2582          ROWS[i][1] = b2;
2583          ROWS[i][2] = b3;
2584          i++;
2585        }
2586      }
2587    }
2588  }
2589
2590  public static final byte[][] KEYS = {
2591    HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
2592    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2593    Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2594    Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2595    Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2596    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2597    Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2598    Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2599    Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
2600  };
2601
2602  public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
2603      Bytes.toBytes("bbb"),
2604      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2605      Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2606      Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2607      Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2608      Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2609      Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2610      Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2611      Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
2612  };
2613
2614  /**
2615   * Create rows in hbase:meta for regions of the specified table with the specified
2616   * start keys.  The first startKey should be a 0 length byte array if you
2617   * want to form a proper range of regions.
2618   * @param conf
2619   * @param htd
2620   * @param startKeys
2621   * @return list of region info for regions added to meta
2622   * @throws IOException
2623   * @deprecated since 2.0 version and will be removed in 3.0 version.
2624   *             use {@link #createMultiRegionsInMeta(Configuration, TableDescriptor, byte[][])}
2625   */
2626  @Deprecated
2627  public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2628      final HTableDescriptor htd, byte [][] startKeys) throws IOException {
2629    return createMultiRegionsInMeta(conf, (TableDescriptor) htd, startKeys)
2630        .stream().map(ImmutableHRegionInfo::new).collect(Collectors.toList());
2631  }
2632  /**
2633   * Create rows in hbase:meta for regions of the specified table with the specified
2634   * start keys.  The first startKey should be a 0 length byte array if you
2635   * want to form a proper range of regions.
2636   * @param conf
2637   * @param htd
2638   * @param startKeys
2639   * @return list of region info for regions added to meta
2640   * @throws IOException
2641   */
2642  public List<RegionInfo> createMultiRegionsInMeta(final Configuration conf,
2643      final TableDescriptor htd, byte [][] startKeys)
2644  throws IOException {
2645    Table meta = getConnection().getTable(TableName.META_TABLE_NAME);
2646    Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2647    List<RegionInfo> newRegions = new ArrayList<>(startKeys.length);
2648    MetaTableAccessor
2649        .updateTableState(getConnection(), htd.getTableName(), TableState.State.ENABLED);
2650    // add custom ones
2651    for (int i = 0; i < startKeys.length; i++) {
2652      int j = (i + 1) % startKeys.length;
2653      RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName())
2654          .setStartKey(startKeys[i])
2655          .setEndKey(startKeys[j])
2656          .build();
2657      MetaTableAccessor.addRegionToMeta(getConnection(), hri);
2658      newRegions.add(hri);
2659    }
2660
2661    meta.close();
2662    return newRegions;
2663  }
2664
2665  /**
2666   * Create an unmanaged WAL. Be sure to close it when you're through.
2667   */
2668  public static WAL createWal(final Configuration conf, final Path rootDir, final RegionInfo hri)
2669      throws IOException {
2670    // The WAL subsystem will use the default rootDir rather than the passed in rootDir
2671    // unless I pass along via the conf.
2672    Configuration confForWAL = new Configuration(conf);
2673    confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
2674    return new WALFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)).getWAL(hri);
2675  }
2676
2677
2678  /**
2679   * Create a region with it's own WAL. Be sure to call
2680   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2681   */
2682  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2683      final Configuration conf, final TableDescriptor htd) throws IOException {
2684    return createRegionAndWAL(info, rootDir, conf, htd, true);
2685  }
2686
2687  /**
2688   * Create a region with it's own WAL. Be sure to call
2689   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2690   */
2691  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2692      final Configuration conf, final TableDescriptor htd, BlockCache blockCache)
2693      throws IOException {
2694    HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
2695    region.setBlockCache(blockCache);
2696    region.initialize();
2697    return region;
2698  }
2699  /**
2700   * Create a region with it's own WAL. Be sure to call
2701   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2702   */
2703  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2704      final Configuration conf, final TableDescriptor htd, MobFileCache mobFileCache)
2705      throws IOException {
2706    HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
2707    region.setMobFileCache(mobFileCache);
2708    region.initialize();
2709    return region;
2710  }
2711
2712  /**
2713   * Create a region with it's own WAL. Be sure to call
2714   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2715   */
2716  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2717      final Configuration conf, final TableDescriptor htd, boolean initialize)
2718      throws IOException {
2719    ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
2720    WAL wal = createWal(conf, rootDir, info);
2721    return HRegion.createHRegion(info, rootDir, conf, htd, wal, initialize);
2722  }
2723
2724  /**
2725   * Returns all rows from the hbase:meta table.
2726   *
2727   * @throws IOException When reading the rows fails.
2728   */
2729  public List<byte[]> getMetaTableRows() throws IOException {
2730    // TODO: Redo using MetaTableAccessor class
2731    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
2732    List<byte[]> rows = new ArrayList<>();
2733    ResultScanner s = t.getScanner(new Scan());
2734    for (Result result : s) {
2735      LOG.info("getMetaTableRows: row -> " +
2736        Bytes.toStringBinary(result.getRow()));
2737      rows.add(result.getRow());
2738    }
2739    s.close();
2740    t.close();
2741    return rows;
2742  }
2743
2744  /**
2745   * Returns all rows from the hbase:meta table for a given user table
2746   *
2747   * @throws IOException When reading the rows fails.
2748   */
2749  public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2750    // TODO: Redo using MetaTableAccessor.
2751    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
2752    List<byte[]> rows = new ArrayList<>();
2753    ResultScanner s = t.getScanner(new Scan());
2754    for (Result result : s) {
2755      RegionInfo info = MetaTableAccessor.getRegionInfo(result);
2756      if (info == null) {
2757        LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2758        // TODO figure out what to do for this new hosed case.
2759        continue;
2760      }
2761
2762      if (info.getTable().equals(tableName)) {
2763        LOG.info("getMetaTableRows: row -> " +
2764            Bytes.toStringBinary(result.getRow()) + info);
2765        rows.add(result.getRow());
2766      }
2767    }
2768    s.close();
2769    t.close();
2770    return rows;
2771  }
2772
2773  /**
2774   * Returns all regions of the specified table
2775   *
2776   * @param tableName the table name
2777   * @return all regions of the specified table
2778   * @throws IOException when getting the regions fails.
2779   */
2780  private List<RegionInfo> getRegions(TableName tableName) throws IOException {
2781    try (Admin admin = getConnection().getAdmin()) {
2782      return admin.getRegions(tableName);
2783    }
2784  }
2785
2786  /*
2787   * Find any other region server which is different from the one identified by parameter
2788   * @param rs
2789   * @return another region server
2790   */
2791  public HRegionServer getOtherRegionServer(HRegionServer rs) {
2792    for (JVMClusterUtil.RegionServerThread rst :
2793      getMiniHBaseCluster().getRegionServerThreads()) {
2794      if (!(rst.getRegionServer() == rs)) {
2795        return rst.getRegionServer();
2796      }
2797    }
2798    return null;
2799  }
2800
2801  /**
2802   * Tool to get the reference to the region server object that holds the
2803   * region of the specified user table.
2804   * @param tableName user table to lookup in hbase:meta
2805   * @return region server that holds it, null if the row doesn't exist
2806   * @throws IOException
2807   * @throws InterruptedException
2808   */
2809  public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2810      throws IOException, InterruptedException {
2811    List<RegionInfo> regions = getRegions(tableName);
2812    if (regions == null || regions.isEmpty()) {
2813      return null;
2814    }
2815    LOG.debug("Found " + regions.size() + " regions for table " +
2816        tableName);
2817
2818    byte[] firstRegionName = regions.stream()
2819        .filter(r -> !r.isOffline())
2820        .map(RegionInfo::getRegionName)
2821        .findFirst()
2822        .orElseThrow(() -> new IOException("online regions not found in table " + tableName));
2823
2824    LOG.debug("firstRegionName=" + Bytes.toString(firstRegionName));
2825    long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2826      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2827    int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2828      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2829    RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2830    while(retrier.shouldRetry()) {
2831      int index = getMiniHBaseCluster().getServerWith(firstRegionName);
2832      if (index != -1) {
2833        return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2834      }
2835      // Came back -1.  Region may not be online yet.  Sleep a while.
2836      retrier.sleepUntilNextRetry();
2837    }
2838    return null;
2839  }
2840
2841  /**
2842   * Starts a <code>MiniMRCluster</code> with a default number of
2843   * <code>TaskTracker</code>'s.
2844   *
2845   * @throws IOException When starting the cluster fails.
2846   */
2847  public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2848    // Set a very high max-disk-utilization percentage to avoid the NodeManagers from failing.
2849    conf.setIfUnset(
2850        "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
2851        "99.0");
2852    startMiniMapReduceCluster(2);
2853    return mrCluster;
2854  }
2855
2856  /**
2857   * Tasktracker has a bug where changing the hadoop.log.dir system property
2858   * will not change its internal static LOG_DIR variable.
2859   */
2860  private void forceChangeTaskLogDir() {
2861    Field logDirField;
2862    try {
2863      logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2864      logDirField.setAccessible(true);
2865
2866      Field modifiersField = Field.class.getDeclaredField("modifiers");
2867      modifiersField.setAccessible(true);
2868      modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2869
2870      logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2871    } catch (SecurityException e) {
2872      throw new RuntimeException(e);
2873    } catch (NoSuchFieldException e) {
2874      // TODO Auto-generated catch block
2875      throw new RuntimeException(e);
2876    } catch (IllegalArgumentException e) {
2877      throw new RuntimeException(e);
2878    } catch (IllegalAccessException e) {
2879      throw new RuntimeException(e);
2880    }
2881  }
2882
2883  /**
2884   * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2885   * filesystem.
2886   * @param servers  The number of <code>TaskTracker</code>'s to start.
2887   * @throws IOException When starting the cluster fails.
2888   */
2889  private void startMiniMapReduceCluster(final int servers) throws IOException {
2890    if (mrCluster != null) {
2891      throw new IllegalStateException("MiniMRCluster is already running");
2892    }
2893    LOG.info("Starting mini mapreduce cluster...");
2894    setupClusterTestDir();
2895    createDirsAndSetProperties();
2896
2897    forceChangeTaskLogDir();
2898
2899    //// hadoop2 specific settings
2900    // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2901    // we up the VM usable so that processes don't get killed.
2902    conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2903
2904    // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2905    // this avoids the problem by disabling speculative task execution in tests.
2906    conf.setBoolean("mapreduce.map.speculative", false);
2907    conf.setBoolean("mapreduce.reduce.speculative", false);
2908    ////
2909
2910    // Allow the user to override FS URI for this map-reduce cluster to use.
2911    mrCluster = new MiniMRCluster(servers,
2912      FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2913      null, null, new JobConf(this.conf));
2914    JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2915    if (jobConf == null) {
2916      jobConf = mrCluster.createJobConf();
2917    }
2918
2919    jobConf.set("mapreduce.cluster.local.dir",
2920      conf.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not
2921    LOG.info("Mini mapreduce cluster started");
2922
2923    // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2924    // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
2925    // necessary config properties here.  YARN-129 required adding a few properties.
2926    conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
2927    // this for mrv2 support; mr1 ignores this
2928    conf.set("mapreduce.framework.name", "yarn");
2929    conf.setBoolean("yarn.is.minicluster", true);
2930    String rmAddress = jobConf.get("yarn.resourcemanager.address");
2931    if (rmAddress != null) {
2932      conf.set("yarn.resourcemanager.address", rmAddress);
2933    }
2934    String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2935    if (historyAddress != null) {
2936      conf.set("mapreduce.jobhistory.address", historyAddress);
2937    }
2938    String schedulerAddress =
2939      jobConf.get("yarn.resourcemanager.scheduler.address");
2940    if (schedulerAddress != null) {
2941      conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2942    }
2943    String mrJobHistoryWebappAddress =
2944      jobConf.get("mapreduce.jobhistory.webapp.address");
2945    if (mrJobHistoryWebappAddress != null) {
2946      conf.set("mapreduce.jobhistory.webapp.address", mrJobHistoryWebappAddress);
2947    }
2948    String yarnRMWebappAddress =
2949      jobConf.get("yarn.resourcemanager.webapp.address");
2950    if (yarnRMWebappAddress != null) {
2951      conf.set("yarn.resourcemanager.webapp.address", yarnRMWebappAddress);
2952    }
2953  }
2954
2955  /**
2956   * Stops the previously started <code>MiniMRCluster</code>.
2957   */
2958  public void shutdownMiniMapReduceCluster() {
2959    if (mrCluster != null) {
2960      LOG.info("Stopping mini mapreduce cluster...");
2961      mrCluster.shutdown();
2962      mrCluster = null;
2963      LOG.info("Mini mapreduce cluster stopped");
2964    }
2965    // Restore configuration to point to local jobtracker
2966    conf.set("mapreduce.jobtracker.address", "local");
2967  }
2968
2969  /**
2970   * Create a stubbed out RegionServerService, mainly for getting FS.
2971   */
2972  public RegionServerServices createMockRegionServerService() throws IOException {
2973    return createMockRegionServerService((ServerName)null);
2974  }
2975
2976  /**
2977   * Create a stubbed out RegionServerService, mainly for getting FS.
2978   * This version is used by TestTokenAuthentication
2979   */
2980  public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws
2981      IOException {
2982    final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2983    rss.setFileSystem(getTestFileSystem());
2984    rss.setRpcServer(rpc);
2985    return rss;
2986  }
2987
2988  /**
2989   * Create a stubbed out RegionServerService, mainly for getting FS.
2990   * This version is used by TestOpenRegionHandler
2991   */
2992  public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2993    final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2994    rss.setFileSystem(getTestFileSystem());
2995    return rss;
2996  }
2997
2998  /**
2999   * Switches the logger for the given class to DEBUG level.
3000   * @param clazz The class for which to switch to debug logging.
3001   * @deprecated In 2.3.0, will be removed in 4.0.0. Only support changing log level on log4j now as
3002   *             HBase only uses log4j. You should do this by your own as it you know which log
3003   *             framework you are using then set the log level to debug is very easy.
3004   */
3005  @Deprecated
3006  public void enableDebug(Class<?> clazz) {
3007    Log4jUtils.enableDebug(clazz);
3008  }
3009
3010  /**
3011   * Expire the Master's session
3012   * @throws Exception
3013   */
3014  public void expireMasterSession() throws Exception {
3015    HMaster master = getMiniHBaseCluster().getMaster();
3016    expireSession(master.getZooKeeper(), false);
3017  }
3018
3019  /**
3020   * Expire a region server's session
3021   * @param index which RS
3022   */
3023  public void expireRegionServerSession(int index) throws Exception {
3024    HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
3025    expireSession(rs.getZooKeeper(), false);
3026    decrementMinRegionServerCount();
3027  }
3028
3029  private void decrementMinRegionServerCount() {
3030    // decrement the count for this.conf, for newly spwaned master
3031    // this.hbaseCluster shares this configuration too
3032    decrementMinRegionServerCount(getConfiguration());
3033
3034    // each master thread keeps a copy of configuration
3035    for (MasterThread master : getHBaseCluster().getMasterThreads()) {
3036      decrementMinRegionServerCount(master.getMaster().getConfiguration());
3037    }
3038  }
3039
3040  private void decrementMinRegionServerCount(Configuration conf) {
3041    int currentCount = conf.getInt(
3042        ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
3043    if (currentCount != -1) {
3044      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
3045          Math.max(currentCount - 1, 1));
3046    }
3047  }
3048
3049  public void expireSession(ZKWatcher nodeZK) throws Exception {
3050   expireSession(nodeZK, false);
3051  }
3052
3053  /**
3054   * Expire a ZooKeeper session as recommended in ZooKeeper documentation
3055   * http://hbase.apache.org/book.html#trouble.zookeeper
3056   * There are issues when doing this:
3057   * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
3058   * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
3059   *
3060   * @param nodeZK - the ZK watcher to expire
3061   * @param checkStatus - true to check if we can create a Table with the
3062   *                    current configuration.
3063   */
3064  public void expireSession(ZKWatcher nodeZK, boolean checkStatus)
3065    throws Exception {
3066    Configuration c = new Configuration(this.conf);
3067    String quorumServers = ZKConfig.getZKQuorumServersString(c);
3068    ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
3069    byte[] password = zk.getSessionPasswd();
3070    long sessionID = zk.getSessionId();
3071
3072    // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
3073    //  so we create a first watcher to be sure that the
3074    //  event was sent. We expect that if our watcher receives the event
3075    //  other watchers on the same machine will get is as well.
3076    // When we ask to close the connection, ZK does not close it before
3077    //  we receive all the events, so don't have to capture the event, just
3078    //  closing the connection should be enough.
3079    ZooKeeper monitor = new ZooKeeper(quorumServers,
3080      1000, new org.apache.zookeeper.Watcher(){
3081      @Override
3082      public void process(WatchedEvent watchedEvent) {
3083        LOG.info("Monitor ZKW received event="+watchedEvent);
3084      }
3085    } , sessionID, password);
3086
3087    // Making it expire
3088    ZooKeeper newZK = new ZooKeeper(quorumServers,
3089        1000, EmptyWatcher.instance, sessionID, password);
3090
3091    //ensure that we have connection to the server before closing down, otherwise
3092    //the close session event will be eaten out before we start CONNECTING state
3093    long start = System.currentTimeMillis();
3094    while (newZK.getState() != States.CONNECTED
3095         && System.currentTimeMillis() - start < 1000) {
3096       Thread.sleep(1);
3097    }
3098    newZK.close();
3099    LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
3100
3101    // Now closing & waiting to be sure that the clients get it.
3102    monitor.close();
3103
3104    if (checkStatus) {
3105      getConnection().getTable(TableName.META_TABLE_NAME).close();
3106    }
3107  }
3108
3109  /**
3110   * Get the Mini HBase cluster.
3111   *
3112   * @return hbase cluster
3113   * @see #getHBaseClusterInterface()
3114   */
3115  public MiniHBaseCluster getHBaseCluster() {
3116    return getMiniHBaseCluster();
3117  }
3118
3119  /**
3120   * Returns the HBaseCluster instance.
3121   * <p>Returned object can be any of the subclasses of HBaseCluster, and the
3122   * tests referring this should not assume that the cluster is a mini cluster or a
3123   * distributed one. If the test only works on a mini cluster, then specific
3124   * method {@link #getMiniHBaseCluster()} can be used instead w/o the
3125   * need to type-cast.
3126   */
3127  public HBaseCluster getHBaseClusterInterface() {
3128    //implementation note: we should rename this method as #getHBaseCluster(),
3129    //but this would require refactoring 90+ calls.
3130    return hbaseCluster;
3131  }
3132
3133  public void closeConnection() throws IOException {
3134    Closeables.close(hbaseAdmin, true);
3135    Closeables.close(connection, true);
3136    this.hbaseAdmin = null;
3137    this.connection = null;
3138  }
3139
3140  /**
3141   * Resets the connections so that the next time getConnection() is called, a new connection is
3142   * created. This is needed in cases where the entire cluster / all the masters are shutdown and
3143   * the connection is not valid anymore.
3144   * TODO: There should be a more coherent way of doing this. Unfortunately the way tests are
3145   *   written, not all start() stop() calls go through this class. Most tests directly operate on
3146   *   the underlying mini/local hbase cluster. That makes it difficult for this wrapper class to
3147   *   maintain the connection state automatically. Cleaning this is a much bigger refactor.
3148   */
3149  public void invalidateConnection() throws IOException {
3150    closeConnection();
3151    // Update the master addresses if they changed.
3152    final String masterConfigBefore = conf.get(HConstants.MASTER_ADDRS_KEY);
3153    final String masterConfAfter = getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY);
3154    LOG.info("Invalidated connection. Updating master addresses before: {} after: {}",
3155        masterConfigBefore, masterConfAfter);
3156    conf.set(HConstants.MASTER_ADDRS_KEY,
3157        getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY));
3158  }
3159
3160  /**
3161   * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster.
3162   * @throws IOException
3163   */
3164  public Connection getConnection() throws IOException {
3165    if (this.connection == null) {
3166      this.connection = ConnectionFactory.createConnection(this.conf);
3167    }
3168    return this.connection;
3169  }
3170
3171  /**
3172   * Returns a Admin instance.
3173   * This instance is shared between HBaseTestingUtility instance users. Closing it has no effect,
3174   * it will be closed automatically when the cluster shutdowns
3175   *
3176   * @return HBaseAdmin instance which is guaranteed to support only {@link Admin} interface.
3177   *   Functions in HBaseAdmin not provided by {@link Admin} interface can be changed/deleted
3178   *   anytime.
3179   * @deprecated Since 2.0. Will be removed in 3.0. Use {@link #getAdmin()} instead.
3180   */
3181  @Deprecated
3182  public synchronized HBaseAdmin getHBaseAdmin()
3183  throws IOException {
3184    if (hbaseAdmin == null){
3185      this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
3186    }
3187    return hbaseAdmin;
3188  }
3189
3190  /**
3191   * Returns an Admin instance which is shared between HBaseTestingUtility instance users.
3192   * Closing it has no effect, it will be closed automatically when the cluster shutdowns
3193   */
3194  public synchronized Admin getAdmin() throws IOException {
3195    if (hbaseAdmin == null){
3196      this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
3197    }
3198    return hbaseAdmin;
3199  }
3200
3201  private HBaseAdmin hbaseAdmin = null;
3202
3203  /**
3204   * Returns an {@link Hbck} instance. Needs be closed when done.
3205   */
3206  public Hbck getHbck() throws IOException {
3207    return getConnection().getHbck();
3208  }
3209
3210  /**
3211   * Unassign the named region.
3212   *
3213   * @param regionName  The region to unassign.
3214   */
3215  public void unassignRegion(String regionName) throws IOException {
3216    unassignRegion(Bytes.toBytes(regionName));
3217  }
3218
3219  /**
3220   * Unassign the named region.
3221   *
3222   * @param regionName  The region to unassign.
3223   */
3224  public void unassignRegion(byte[] regionName) throws IOException {
3225    getAdmin().unassign(regionName, true);
3226  }
3227
3228  /**
3229   * Closes the region containing the given row.
3230   *
3231   * @param row  The row to find the containing region.
3232   * @param table  The table to find the region.
3233   */
3234  public void unassignRegionByRow(String row, RegionLocator table) throws IOException {
3235    unassignRegionByRow(Bytes.toBytes(row), table);
3236  }
3237
3238  /**
3239   * Closes the region containing the given row.
3240   *
3241   * @param row  The row to find the containing region.
3242   * @param table  The table to find the region.
3243   * @throws IOException
3244   */
3245  public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOException {
3246    HRegionLocation hrl = table.getRegionLocation(row);
3247    unassignRegion(hrl.getRegionInfo().getRegionName());
3248  }
3249
3250  /*
3251   * Retrieves a splittable region randomly from tableName
3252   *
3253   * @param tableName name of table
3254   * @param maxAttempts maximum number of attempts, unlimited for value of -1
3255   * @return the HRegion chosen, null if none was found within limit of maxAttempts
3256   */
3257  public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
3258    List<HRegion> regions = getHBaseCluster().getRegions(tableName);
3259    int regCount = regions.size();
3260    Set<Integer> attempted = new HashSet<>();
3261    int idx;
3262    int attempts = 0;
3263    do {
3264      regions = getHBaseCluster().getRegions(tableName);
3265      if (regCount != regions.size()) {
3266        // if there was region movement, clear attempted Set
3267        attempted.clear();
3268      }
3269      regCount = regions.size();
3270      // There are chances that before we get the region for the table from an RS the region may
3271      // be going for CLOSE.  This may be because online schema change is enabled
3272      if (regCount > 0) {
3273        idx = random.nextInt(regCount);
3274        // if we have just tried this region, there is no need to try again
3275        if (attempted.contains(idx))
3276          continue;
3277        try {
3278          regions.get(idx).checkSplit();
3279          return regions.get(idx);
3280        } catch (Exception ex) {
3281          LOG.warn("Caught exception", ex);
3282          attempted.add(idx);
3283        }
3284      }
3285      attempts++;
3286    } while (maxAttempts == -1 || attempts < maxAttempts);
3287    return null;
3288  }
3289
3290  public MiniDFSCluster getDFSCluster() {
3291    return dfsCluster;
3292  }
3293
3294  public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, IOException {
3295    setDFSCluster(cluster, true);
3296  }
3297
3298  /**
3299   * Set the MiniDFSCluster
3300   * @param cluster cluster to use
3301   * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before
3302   * it is set.
3303   * @throws IllegalStateException if the passed cluster is up when it is required to be down
3304   * @throws IOException if the FileSystem could not be set from the passed dfs cluster
3305   */
3306  public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown)
3307      throws IllegalStateException, IOException {
3308    if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) {
3309      throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
3310    }
3311    this.dfsCluster = cluster;
3312    this.setFs();
3313  }
3314
3315  public FileSystem getTestFileSystem() throws IOException {
3316    return HFileSystem.get(conf);
3317  }
3318
3319  /**
3320   * Wait until all regions in a table have been assigned.  Waits default timeout before giving up
3321   * (30 seconds).
3322   * @param table Table to wait on.
3323   * @throws InterruptedException
3324   * @throws IOException
3325   */
3326  public void waitTableAvailable(TableName table)
3327      throws InterruptedException, IOException {
3328    waitTableAvailable(table.getName(), 30000);
3329  }
3330
3331  public void waitTableAvailable(TableName table, long timeoutMillis)
3332      throws InterruptedException, IOException {
3333    waitFor(timeoutMillis, predicateTableAvailable(table));
3334  }
3335
3336  /**
3337   * Wait until all regions in a table have been assigned
3338   * @param table Table to wait on.
3339   * @param timeoutMillis Timeout.
3340   * @throws InterruptedException
3341   * @throws IOException
3342   */
3343  public void waitTableAvailable(byte[] table, long timeoutMillis)
3344  throws InterruptedException, IOException {
3345    waitFor(timeoutMillis, predicateTableAvailable(TableName.valueOf(table)));
3346  }
3347
3348  public String explainTableAvailability(TableName tableName) throws IOException {
3349    String msg = explainTableState(tableName, TableState.State.ENABLED) + ", ";
3350    if (getHBaseCluster().getMaster().isAlive()) {
3351      Map<RegionInfo, ServerName> assignments =
3352          getHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
3353              .getRegionAssignments();
3354      final List<Pair<RegionInfo, ServerName>> metaLocations =
3355          MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
3356      for (Pair<RegionInfo, ServerName> metaLocation : metaLocations) {
3357        RegionInfo hri = metaLocation.getFirst();
3358        ServerName sn = metaLocation.getSecond();
3359        if (!assignments.containsKey(hri)) {
3360          msg += ", region " + hri
3361              + " not assigned, but found in meta, it expected to be on " + sn;
3362
3363        } else if (sn == null) {
3364          msg += ",  region " + hri
3365              + " assigned,  but has no server in meta";
3366        } else if (!sn.equals(assignments.get(hri))) {
3367          msg += ",  region " + hri
3368              + " assigned,  but has different servers in meta and AM ( " +
3369              sn + " <> " + assignments.get(hri);
3370        }
3371      }
3372    }
3373    return msg;
3374  }
3375
3376  public String explainTableState(final TableName table, TableState.State state)
3377      throws IOException {
3378    TableState tableState = MetaTableAccessor.getTableState(connection, table);
3379    if (tableState == null) {
3380      return "TableState in META: No table state in META for table " + table
3381          + " last state in meta (including deleted is " + findLastTableState(table) + ")";
3382    } else if (!tableState.inStates(state)) {
3383      return "TableState in META: Not " + state + " state, but " + tableState;
3384    } else {
3385      return "TableState in META: OK";
3386    }
3387  }
3388
3389  @Nullable
3390  public TableState findLastTableState(final TableName table) throws IOException {
3391    final AtomicReference<TableState> lastTableState = new AtomicReference<>(null);
3392    MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
3393      @Override
3394      public boolean visit(Result r) throws IOException {
3395        if (!Arrays.equals(r.getRow(), table.getName()))
3396          return false;
3397        TableState state = MetaTableAccessor.getTableState(r);
3398        if (state != null)
3399          lastTableState.set(state);
3400        return true;
3401      }
3402    };
3403    MetaTableAccessor
3404        .scanMeta(connection, null, null,
3405            MetaTableAccessor.QueryType.TABLE,
3406            Integer.MAX_VALUE, visitor);
3407    return lastTableState.get();
3408  }
3409
3410  /**
3411   * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
3412   * regions have been all assigned.  Will timeout after default period (30 seconds)
3413   * Tolerates nonexistent table.
3414   * @param table the table to wait on.
3415   * @throws InterruptedException if interrupted while waiting
3416   * @throws IOException if an IO problem is encountered
3417   */
3418  public void waitTableEnabled(TableName table)
3419      throws InterruptedException, IOException {
3420    waitTableEnabled(table, 30000);
3421  }
3422
3423  /**
3424   * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
3425   * regions have been all assigned.
3426   * @see #waitTableEnabled(TableName, long)
3427   * @param table Table to wait on.
3428   * @param timeoutMillis Time to wait on it being marked enabled.
3429   * @throws InterruptedException
3430   * @throws IOException
3431   */
3432  public void waitTableEnabled(byte[] table, long timeoutMillis)
3433  throws InterruptedException, IOException {
3434    waitTableEnabled(TableName.valueOf(table), timeoutMillis);
3435  }
3436
3437  public void waitTableEnabled(TableName table, long timeoutMillis)
3438  throws IOException {
3439    waitFor(timeoutMillis, predicateTableEnabled(table));
3440  }
3441
3442  /**
3443   * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
3444   * Will timeout after default period (30 seconds)
3445   * @param table Table to wait on.
3446   * @throws InterruptedException
3447   * @throws IOException
3448   */
3449  public void waitTableDisabled(byte[] table)
3450          throws InterruptedException, IOException {
3451    waitTableDisabled(table, 30000);
3452  }
3453
3454  public void waitTableDisabled(TableName table, long millisTimeout)
3455          throws InterruptedException, IOException {
3456    waitFor(millisTimeout, predicateTableDisabled(table));
3457  }
3458
3459  /**
3460   * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
3461   * @param table Table to wait on.
3462   * @param timeoutMillis Time to wait on it being marked disabled.
3463   * @throws InterruptedException
3464   * @throws IOException
3465   */
3466  public void waitTableDisabled(byte[] table, long timeoutMillis)
3467          throws InterruptedException, IOException {
3468    waitTableDisabled(TableName.valueOf(table), timeoutMillis);
3469  }
3470
3471  /**
3472   * Make sure that at least the specified number of region servers
3473   * are running
3474   * @param num minimum number of region servers that should be running
3475   * @return true if we started some servers
3476   * @throws IOException
3477   */
3478  public boolean ensureSomeRegionServersAvailable(final int num)
3479      throws IOException {
3480    boolean startedServer = false;
3481    MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
3482    for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
3483      LOG.info("Started new server=" + hbaseCluster.startRegionServer());
3484      startedServer = true;
3485    }
3486
3487    return startedServer;
3488  }
3489
3490
3491  /**
3492   * Make sure that at least the specified number of region servers
3493   * are running. We don't count the ones that are currently stopping or are
3494   * stopped.
3495   * @param num minimum number of region servers that should be running
3496   * @return true if we started some servers
3497   * @throws IOException
3498   */
3499  public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
3500    throws IOException {
3501    boolean startedServer = ensureSomeRegionServersAvailable(num);
3502
3503    int nonStoppedServers = 0;
3504    for (JVMClusterUtil.RegionServerThread rst :
3505      getMiniHBaseCluster().getRegionServerThreads()) {
3506
3507      HRegionServer hrs = rst.getRegionServer();
3508      if (hrs.isStopping() || hrs.isStopped()) {
3509        LOG.info("A region server is stopped or stopping:"+hrs);
3510      } else {
3511        nonStoppedServers++;
3512      }
3513    }
3514    for (int i=nonStoppedServers; i<num; ++i) {
3515      LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
3516      startedServer = true;
3517    }
3518    return startedServer;
3519  }
3520
3521
3522  /**
3523   * This method clones the passed <code>c</code> configuration setting a new
3524   * user into the clone.  Use it getting new instances of FileSystem.  Only
3525   * works for DistributedFileSystem w/o Kerberos.
3526   * @param c Initial configuration
3527   * @param differentiatingSuffix Suffix to differentiate this user from others.
3528   * @return A new configuration instance with a different user set into it.
3529   * @throws IOException
3530   */
3531  public static User getDifferentUser(final Configuration c,
3532    final String differentiatingSuffix)
3533  throws IOException {
3534    FileSystem currentfs = FileSystem.get(c);
3535    if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) {
3536      return User.getCurrent();
3537    }
3538    // Else distributed filesystem.  Make a new instance per daemon.  Below
3539    // code is taken from the AppendTestUtil over in hdfs.
3540    String username = User.getCurrent().getName() +
3541      differentiatingSuffix;
3542    User user = User.createUserForTesting(c, username,
3543        new String[]{"supergroup"});
3544    return user;
3545  }
3546
3547  public static NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster)
3548      throws IOException {
3549    NavigableSet<String> online = new TreeSet<>();
3550    for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
3551      try {
3552        for (RegionInfo region :
3553            ProtobufUtil.getOnlineRegions(rst.getRegionServer().getRSRpcServices())) {
3554          online.add(region.getRegionNameAsString());
3555        }
3556      } catch (RegionServerStoppedException e) {
3557        // That's fine.
3558      }
3559    }
3560    for (MasterThread mt : cluster.getLiveMasterThreads()) {
3561      try {
3562        for (RegionInfo region :
3563            ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) {
3564          online.add(region.getRegionNameAsString());
3565        }
3566      } catch (RegionServerStoppedException e) {
3567        // That's fine.
3568      } catch (ServerNotRunningYetException e) {
3569        // That's fine.
3570      }
3571    }
3572    return online;
3573  }
3574
3575  /**
3576   * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
3577   * makes tests linger.  Here is the exception you'll see:
3578   * <pre>
3579   * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block
3580   * blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
3581   * blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683
3582   * failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
3583   * </pre>
3584   * @param stream A DFSClient.DFSOutputStream.
3585   * @param max
3586   * @throws NoSuchFieldException
3587   * @throws SecurityException
3588   * @throws IllegalAccessException
3589   * @throws IllegalArgumentException
3590   */
3591  public static void setMaxRecoveryErrorCount(final OutputStream stream,
3592      final int max) {
3593    try {
3594      Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
3595      for (Class<?> clazz: clazzes) {
3596        String className = clazz.getSimpleName();
3597        if (className.equals("DFSOutputStream")) {
3598          if (clazz.isInstance(stream)) {
3599            Field maxRecoveryErrorCountField =
3600              stream.getClass().getDeclaredField("maxRecoveryErrorCount");
3601            maxRecoveryErrorCountField.setAccessible(true);
3602            maxRecoveryErrorCountField.setInt(stream, max);
3603            break;
3604          }
3605        }
3606      }
3607    } catch (Exception e) {
3608      LOG.info("Could not set max recovery field", e);
3609    }
3610  }
3611
3612  /**
3613   * Uses directly the assignment manager to assign the region. and waits until the specified region
3614   * has completed assignment.
3615   * @return true if the region is assigned false otherwise.
3616   */
3617  public boolean assignRegion(final RegionInfo regionInfo)
3618      throws IOException, InterruptedException {
3619    final AssignmentManager am = getHBaseCluster().getMaster().getAssignmentManager();
3620    am.assign(regionInfo);
3621    return AssignmentTestingUtil.waitForAssignment(am, regionInfo);
3622  }
3623
3624  /**
3625   * Move region to destination server and wait till region is completely moved and online
3626   *
3627   * @param destRegion region to move
3628   * @param destServer destination server of the region
3629   * @throws InterruptedException
3630   * @throws IOException
3631   */
3632  public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer)
3633      throws InterruptedException, IOException {
3634    HMaster master = getMiniHBaseCluster().getMaster();
3635    // TODO: Here we start the move. The move can take a while.
3636    getAdmin().move(destRegion.getEncodedNameAsBytes(), destServer);
3637    while (true) {
3638      ServerName serverName = master.getAssignmentManager().getRegionStates()
3639          .getRegionServerOfRegion(destRegion);
3640      if (serverName != null && serverName.equals(destServer)) {
3641        assertRegionOnServer(destRegion, serverName, 2000);
3642        break;
3643      }
3644      Thread.sleep(10);
3645    }
3646  }
3647
3648  /**
3649   * Wait until all regions for a table in hbase:meta have a non-empty
3650   * info:server, up to a configuable timeout value (default is 60 seconds)
3651   * This means all regions have been deployed,
3652   * master has been informed and updated hbase:meta with the regions deployed
3653   * server.
3654   * @param tableName the table name
3655   * @throws IOException
3656   */
3657  public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
3658    waitUntilAllRegionsAssigned(tableName,
3659      this.conf.getLong("hbase.client.sync.wait.timeout.msec", 60000));
3660  }
3661
3662  /**
3663   * Waith until all system table's regions get assigned
3664   * @throws IOException
3665   */
3666  public void waitUntilAllSystemRegionsAssigned() throws IOException {
3667    waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
3668    waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME);
3669  }
3670
3671  /**
3672   * Wait until all regions for a table in hbase:meta have a non-empty
3673   * info:server, or until timeout.  This means all regions have been deployed,
3674   * master has been informed and updated hbase:meta with the regions deployed
3675   * server.
3676   * @param tableName the table name
3677   * @param timeout timeout, in milliseconds
3678   * @throws IOException
3679   */
3680  public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
3681      throws IOException {
3682    if (!TableName.isMetaTableName(tableName)) {
3683      try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) {
3684        LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " +
3685            timeout + "ms");
3686        waitFor(timeout, 200, true, new ExplainingPredicate<IOException>() {
3687          @Override
3688          public String explainFailure() throws IOException {
3689            return explainTableAvailability(tableName);
3690          }
3691
3692          @Override
3693          public boolean evaluate() throws IOException {
3694            Scan scan = new Scan();
3695            scan.addFamily(HConstants.CATALOG_FAMILY);
3696            boolean tableFound = false;
3697            try (ResultScanner s = meta.getScanner(scan)) {
3698              for (Result r; (r = s.next()) != null;) {
3699                byte[] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
3700                HRegionInfo info = HRegionInfo.parseFromOrNull(b);
3701                if (info != null && info.getTable().equals(tableName)) {
3702                  // Get server hosting this region from catalog family. Return false if no server
3703                  // hosting this region, or if the server hosting this region was recently killed
3704                  // (for fault tolerance testing).
3705                  tableFound = true;
3706                  byte[] server =
3707                      r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
3708                  if (server == null) {
3709                    return false;
3710                  } else {
3711                    byte[] startCode =
3712                        r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
3713                    ServerName serverName =
3714                        ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," +
3715                            Bytes.toLong(startCode));
3716                    if (!getHBaseClusterInterface().isDistributedCluster() &&
3717                        getHBaseCluster().isKilledRS(serverName)) {
3718                      return false;
3719                    }
3720                  }
3721                  if (RegionStateStore.getRegionState(r, info) != RegionState.State.OPEN) {
3722                    return false;
3723                  }
3724                }
3725              }
3726            }
3727            if (!tableFound) {
3728              LOG.warn("Didn't find the entries for table " + tableName + " in meta, already deleted?");
3729            }
3730            return tableFound;
3731          }
3732        });
3733      }
3734    }
3735    LOG.info("All regions for table " + tableName + " assigned to meta. Checking AM states.");
3736    // check from the master state if we are using a mini cluster
3737    if (!getHBaseClusterInterface().isDistributedCluster()) {
3738      // So, all regions are in the meta table but make sure master knows of the assignments before
3739      // returning -- sometimes this can lag.
3740      HMaster master = getHBaseCluster().getMaster();
3741      final RegionStates states = master.getAssignmentManager().getRegionStates();
3742      waitFor(timeout, 200, new ExplainingPredicate<IOException>() {
3743        @Override
3744        public String explainFailure() throws IOException {
3745          return explainTableAvailability(tableName);
3746        }
3747
3748        @Override
3749        public boolean evaluate() throws IOException {
3750          List<RegionInfo> hris = states.getRegionsOfTable(tableName);
3751          return hris != null && !hris.isEmpty();
3752        }
3753      });
3754    }
3755    LOG.info("All regions for table " + tableName + " assigned.");
3756  }
3757
3758  /**
3759   * Do a small get/scan against one store. This is required because store
3760   * has no actual methods of querying itself, and relies on StoreScanner.
3761   */
3762  public static List<Cell> getFromStoreFile(HStore store,
3763                                                Get get) throws IOException {
3764    Scan scan = new Scan(get);
3765    InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3766        scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()),
3767        // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set
3768        // readpoint 0.
3769        0);
3770
3771    List<Cell> result = new ArrayList<>();
3772    scanner.next(result);
3773    if (!result.isEmpty()) {
3774      // verify that we are on the row we want:
3775      Cell kv = result.get(0);
3776      if (!CellUtil.matchingRows(kv, get.getRow())) {
3777        result.clear();
3778      }
3779    }
3780    scanner.close();
3781    return result;
3782  }
3783
3784  /**
3785   * Create region split keys between startkey and endKey
3786   *
3787   * @param startKey
3788   * @param endKey
3789   * @param numRegions the number of regions to be created. it has to be greater than 3.
3790   * @return resulting split keys
3791   */
3792  public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
3793    assertTrue(numRegions>3);
3794    byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3795    byte [][] result = new byte[tmpSplitKeys.length+1][];
3796    System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3797    result[0] = HConstants.EMPTY_BYTE_ARRAY;
3798    return result;
3799  }
3800
3801  /**
3802   * Do a small get/scan against one store. This is required because store
3803   * has no actual methods of querying itself, and relies on StoreScanner.
3804   */
3805  public static List<Cell> getFromStoreFile(HStore store,
3806                                                byte [] row,
3807                                                NavigableSet<byte[]> columns
3808                                                ) throws IOException {
3809    Get get = new Get(row);
3810    Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3811    s.put(store.getColumnFamilyDescriptor().getName(), columns);
3812
3813    return getFromStoreFile(store,get);
3814  }
3815
3816  public static void assertKVListsEqual(String additionalMsg,
3817      final List<? extends Cell> expected,
3818      final List<? extends Cell> actual) {
3819    final int eLen = expected.size();
3820    final int aLen = actual.size();
3821    final int minLen = Math.min(eLen, aLen);
3822
3823    int i;
3824    for (i = 0; i < minLen
3825        && CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0;
3826        ++i) {}
3827
3828    if (additionalMsg == null) {
3829      additionalMsg = "";
3830    }
3831    if (!additionalMsg.isEmpty()) {
3832      additionalMsg = ". " + additionalMsg;
3833    }
3834
3835    if (eLen != aLen || i != minLen) {
3836      throw new AssertionError(
3837          "Expected and actual KV arrays differ at position " + i + ": " +
3838          safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3839          safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3840    }
3841  }
3842
3843  public static <T> String safeGetAsStr(List<T> lst, int i) {
3844    if (0 <= i && i < lst.size()) {
3845      return lst.get(i).toString();
3846    } else {
3847      return "<out_of_range>";
3848    }
3849  }
3850
3851  public String getClusterKey() {
3852    return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3853        + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3854        + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3855            HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3856  }
3857
3858  /** Creates a random table with the given parameters */
3859  public Table createRandomTable(TableName tableName,
3860      final Collection<String> families,
3861      final int maxVersions,
3862      final int numColsPerRow,
3863      final int numFlushes,
3864      final int numRegions,
3865      final int numRowsPerFlush)
3866      throws IOException, InterruptedException {
3867
3868    LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3869        " regions, " + numFlushes + " storefiles per region, " +
3870        numRowsPerFlush + " rows per flush, maxVersions=" +  maxVersions +
3871        "\n");
3872
3873    final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3874    final int numCF = families.size();
3875    final byte[][] cfBytes = new byte[numCF][];
3876    {
3877      int cfIndex = 0;
3878      for (String cf : families) {
3879        cfBytes[cfIndex++] = Bytes.toBytes(cf);
3880      }
3881    }
3882
3883    final int actualStartKey = 0;
3884    final int actualEndKey = Integer.MAX_VALUE;
3885    final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3886    final int splitStartKey = actualStartKey + keysPerRegion;
3887    final int splitEndKey = actualEndKey - keysPerRegion;
3888    final String keyFormat = "%08x";
3889    final Table table = createTable(tableName, cfBytes,
3890        maxVersions,
3891        Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3892        Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3893        numRegions);
3894
3895    if (hbaseCluster != null) {
3896      getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3897    }
3898
3899    BufferedMutator mutator = getConnection().getBufferedMutator(tableName);
3900
3901    for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3902      for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3903        final byte[] row = Bytes.toBytes(String.format(keyFormat,
3904            actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3905
3906        Put put = new Put(row);
3907        Delete del = new Delete(row);
3908        for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3909          final byte[] cf = cfBytes[rand.nextInt(numCF)];
3910          final long ts = rand.nextInt();
3911          final byte[] qual = Bytes.toBytes("col" + iCol);
3912          if (rand.nextBoolean()) {
3913            final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3914                "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3915                ts + "_random_" + rand.nextLong());
3916            put.addColumn(cf, qual, ts, value);
3917          } else if (rand.nextDouble() < 0.8) {
3918            del.addColumn(cf, qual, ts);
3919          } else {
3920            del.addColumns(cf, qual, ts);
3921          }
3922        }
3923
3924        if (!put.isEmpty()) {
3925          mutator.mutate(put);
3926        }
3927
3928        if (!del.isEmpty()) {
3929          mutator.mutate(del);
3930        }
3931      }
3932      LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3933      mutator.flush();
3934      if (hbaseCluster != null) {
3935        getMiniHBaseCluster().flushcache(table.getName());
3936      }
3937    }
3938    mutator.close();
3939
3940    return table;
3941  }
3942
3943  public static int randomFreePort() {
3944    return HBaseCommonTestingUtility.randomFreePort();
3945  }
3946  public static String randomMultiCastAddress() {
3947    return "226.1.1." + random.nextInt(254);
3948  }
3949
3950  public static void waitForHostPort(String host, int port)
3951      throws IOException {
3952    final int maxTimeMs = 10000;
3953    final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3954    IOException savedException = null;
3955    LOG.info("Waiting for server at " + host + ":" + port);
3956    for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3957      try {
3958        Socket sock = new Socket(InetAddress.getByName(host), port);
3959        sock.close();
3960        savedException = null;
3961        LOG.info("Server at " + host + ":" + port + " is available");
3962        break;
3963      } catch (UnknownHostException e) {
3964        throw new IOException("Failed to look up " + host, e);
3965      } catch (IOException e) {
3966        savedException = e;
3967      }
3968      Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3969    }
3970
3971    if (savedException != null) {
3972      throw savedException;
3973    }
3974  }
3975
3976  /**
3977   * Creates a pre-split table for load testing. If the table already exists,
3978   * logs a warning and continues.
3979   * @return the number of regions the table was split into
3980   */
3981  public static int createPreSplitLoadTestTable(Configuration conf,
3982      TableName tableName, byte[] columnFamily, Algorithm compression,
3983      DataBlockEncoding dataBlockEncoding) throws IOException {
3984    return createPreSplitLoadTestTable(conf, tableName,
3985      columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1,
3986      Durability.USE_DEFAULT);
3987  }
3988  /**
3989   * Creates a pre-split table for load testing. If the table already exists,
3990   * logs a warning and continues.
3991   * @return the number of regions the table was split into
3992   */
3993  public static int createPreSplitLoadTestTable(Configuration conf,
3994      TableName tableName, byte[] columnFamily, Algorithm compression,
3995      DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
3996      Durability durability)
3997          throws IOException {
3998    HTableDescriptor desc = new HTableDescriptor(tableName);
3999    desc.setDurability(durability);
4000    desc.setRegionReplication(regionReplication);
4001    HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
4002    hcd.setDataBlockEncoding(dataBlockEncoding);
4003    hcd.setCompressionType(compression);
4004    return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
4005  }
4006
4007  /**
4008   * Creates a pre-split table for load testing. If the table already exists,
4009   * logs a warning and continues.
4010   * @return the number of regions the table was split into
4011   */
4012  public static int createPreSplitLoadTestTable(Configuration conf,
4013      TableName tableName, byte[][] columnFamilies, Algorithm compression,
4014      DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
4015      Durability durability)
4016          throws IOException {
4017    HTableDescriptor desc = new HTableDescriptor(tableName);
4018    desc.setDurability(durability);
4019    desc.setRegionReplication(regionReplication);
4020    HColumnDescriptor[] hcds = new HColumnDescriptor[columnFamilies.length];
4021    for (int i = 0; i < columnFamilies.length; i++) {
4022      HColumnDescriptor hcd = new HColumnDescriptor(columnFamilies[i]);
4023      hcd.setDataBlockEncoding(dataBlockEncoding);
4024      hcd.setCompressionType(compression);
4025      hcds[i] = hcd;
4026    }
4027    return createPreSplitLoadTestTable(conf, desc, hcds, numRegionsPerServer);
4028  }
4029
4030  /**
4031   * Creates a pre-split table for load testing. If the table already exists,
4032   * logs a warning and continues.
4033   * @return the number of regions the table was split into
4034   */
4035  public static int createPreSplitLoadTestTable(Configuration conf,
4036      TableDescriptor desc, ColumnFamilyDescriptor hcd) throws IOException {
4037    return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
4038  }
4039
4040  /**
4041   * Creates a pre-split table for load testing. If the table already exists,
4042   * logs a warning and continues.
4043   * @return the number of regions the table was split into
4044   */
4045  public static int createPreSplitLoadTestTable(Configuration conf,
4046      TableDescriptor desc, ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException {
4047    return createPreSplitLoadTestTable(conf, desc, new ColumnFamilyDescriptor[] {hcd},
4048        numRegionsPerServer);
4049  }
4050
4051  /**
4052   * Creates a pre-split table for load testing. If the table already exists,
4053   * logs a warning and continues.
4054   * @return the number of regions the table was split into
4055   */
4056  public static int createPreSplitLoadTestTable(Configuration conf,
4057      TableDescriptor desc, ColumnFamilyDescriptor[] hcds,
4058      int numRegionsPerServer) throws IOException {
4059    return createPreSplitLoadTestTable(conf, desc, hcds,
4060      new RegionSplitter.HexStringSplit(), numRegionsPerServer);
4061  }
4062
4063  /**
4064   * Creates a pre-split table for load testing. If the table already exists,
4065   * logs a warning and continues.
4066   * @return the number of regions the table was split into
4067   */
4068  public static int createPreSplitLoadTestTable(Configuration conf,
4069      TableDescriptor td, ColumnFamilyDescriptor[] cds,
4070      SplitAlgorithm splitter, int numRegionsPerServer) throws IOException {
4071    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(td);
4072    for (ColumnFamilyDescriptor cd : cds) {
4073      if (!td.hasColumnFamily(cd.getName())) {
4074        builder.setColumnFamily(cd);
4075      }
4076    }
4077    td = builder.build();
4078    int totalNumberOfRegions = 0;
4079    Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
4080    Admin admin = unmanagedConnection.getAdmin();
4081
4082    try {
4083      // create a table a pre-splits regions.
4084      // The number of splits is set as:
4085      //    region servers * regions per region server).
4086      int numberOfServers = admin.getRegionServers().size();
4087      if (numberOfServers == 0) {
4088        throw new IllegalStateException("No live regionservers");
4089      }
4090
4091      totalNumberOfRegions = numberOfServers * numRegionsPerServer;
4092      LOG.info("Number of live regionservers: " + numberOfServers + ", " +
4093          "pre-splitting table into " + totalNumberOfRegions + " regions " +
4094          "(regions per server: " + numRegionsPerServer + ")");
4095
4096      byte[][] splits = splitter.split(
4097          totalNumberOfRegions);
4098
4099      admin.createTable(td, splits);
4100    } catch (MasterNotRunningException e) {
4101      LOG.error("Master not running", e);
4102      throw new IOException(e);
4103    } catch (TableExistsException e) {
4104      LOG.warn("Table " + td.getTableName() +
4105          " already exists, continuing");
4106    } finally {
4107      admin.close();
4108      unmanagedConnection.close();
4109    }
4110    return totalNumberOfRegions;
4111  }
4112
4113  public static int getMetaRSPort(Connection connection) throws IOException {
4114    try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) {
4115      return locator.getRegionLocation(Bytes.toBytes("")).getPort();
4116    }
4117  }
4118
4119  /**
4120   *  Due to async racing issue, a region may not be in
4121   *  the online region list of a region server yet, after
4122   *  the assignment znode is deleted and the new assignment
4123   *  is recorded in master.
4124   */
4125  public void assertRegionOnServer(
4126      final RegionInfo hri, final ServerName server,
4127      final long timeout) throws IOException, InterruptedException {
4128    long timeoutTime = System.currentTimeMillis() + timeout;
4129    while (true) {
4130      List<RegionInfo> regions = getAdmin().getRegions(server);
4131      if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) return;
4132      long now = System.currentTimeMillis();
4133      if (now > timeoutTime) break;
4134      Thread.sleep(10);
4135    }
4136    fail("Could not find region " + hri.getRegionNameAsString()
4137      + " on server " + server);
4138  }
4139
4140  /**
4141   * Check to make sure the region is open on the specified
4142   * region server, but not on any other one.
4143   */
4144  public void assertRegionOnlyOnServer(
4145      final RegionInfo hri, final ServerName server,
4146      final long timeout) throws IOException, InterruptedException {
4147    long timeoutTime = System.currentTimeMillis() + timeout;
4148    while (true) {
4149      List<RegionInfo> regions = getAdmin().getRegions(server);
4150      if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) {
4151        List<JVMClusterUtil.RegionServerThread> rsThreads =
4152          getHBaseCluster().getLiveRegionServerThreads();
4153        for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
4154          HRegionServer rs = rsThread.getRegionServer();
4155          if (server.equals(rs.getServerName())) {
4156            continue;
4157          }
4158          Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
4159          for (HRegion r: hrs) {
4160            assertTrue("Region should not be double assigned",
4161              r.getRegionInfo().getRegionId() != hri.getRegionId());
4162          }
4163        }
4164        return; // good, we are happy
4165      }
4166      long now = System.currentTimeMillis();
4167      if (now > timeoutTime) break;
4168      Thread.sleep(10);
4169    }
4170    fail("Could not find region " + hri.getRegionNameAsString()
4171      + " on server " + server);
4172  }
4173
4174  public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd) throws IOException {
4175    TableDescriptor td =
4176        TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build();
4177    RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build();
4178    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td);
4179  }
4180
4181  public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd,
4182      BlockCache blockCache) throws IOException {
4183    TableDescriptor td =
4184        TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build();
4185    RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build();
4186    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td, blockCache);
4187  }
4188
4189  public void setFileSystemURI(String fsURI) {
4190    FS_URI = fsURI;
4191  }
4192
4193  /**
4194   * Returns a {@link Predicate} for checking that there are no regions in transition in master
4195   */
4196  public ExplainingPredicate<IOException> predicateNoRegionsInTransition() {
4197    return new ExplainingPredicate<IOException>() {
4198      @Override
4199      public String explainFailure() throws IOException {
4200        final RegionStates regionStates = getMiniHBaseCluster().getMaster()
4201            .getAssignmentManager().getRegionStates();
4202        return "found in transition: " + regionStates.getRegionsInTransition().toString();
4203      }
4204
4205      @Override
4206      public boolean evaluate() throws IOException {
4207        HMaster master = getMiniHBaseCluster().getMaster();
4208        if (master == null) return false;
4209        AssignmentManager am = master.getAssignmentManager();
4210        if (am == null) return false;
4211        return !am.hasRegionsInTransition();
4212      }
4213    };
4214  }
4215
4216  /**
4217   * Returns a {@link Predicate} for checking that table is enabled
4218   */
4219  public Waiter.Predicate<IOException> predicateTableEnabled(final TableName tableName) {
4220    return new ExplainingPredicate<IOException>() {
4221      @Override
4222      public String explainFailure() throws IOException {
4223        return explainTableState(tableName, TableState.State.ENABLED);
4224      }
4225
4226      @Override
4227      public boolean evaluate() throws IOException {
4228        return getAdmin().tableExists(tableName) && getAdmin().isTableEnabled(tableName);
4229      }
4230    };
4231  }
4232
4233  /**
4234   * Returns a {@link Predicate} for checking that table is enabled
4235   */
4236  public Waiter.Predicate<IOException> predicateTableDisabled(final TableName tableName) {
4237    return new ExplainingPredicate<IOException>() {
4238      @Override
4239      public String explainFailure() throws IOException {
4240        return explainTableState(tableName, TableState.State.DISABLED);
4241      }
4242
4243      @Override
4244      public boolean evaluate() throws IOException {
4245        return getAdmin().isTableDisabled(tableName);
4246      }
4247    };
4248  }
4249
4250  /**
4251   * Returns a {@link Predicate} for checking that table is enabled
4252   */
4253  public Waiter.Predicate<IOException> predicateTableAvailable(final TableName tableName) {
4254    return new ExplainingPredicate<IOException>() {
4255      @Override
4256      public String explainFailure() throws IOException {
4257        return explainTableAvailability(tableName);
4258      }
4259
4260      @Override
4261      public boolean evaluate() throws IOException {
4262        boolean tableAvailable = getAdmin().isTableAvailable(tableName);
4263        if (tableAvailable) {
4264          try (Table table = getConnection().getTable(tableName)) {
4265            TableDescriptor htd = table.getDescriptor();
4266            for (HRegionLocation loc : getConnection().getRegionLocator(tableName)
4267                .getAllRegionLocations()) {
4268              Scan scan = new Scan().withStartRow(loc.getRegionInfo().getStartKey())
4269                  .withStopRow(loc.getRegionInfo().getEndKey()).setOneRowLimit()
4270                  .setMaxResultsPerColumnFamily(1).setCacheBlocks(false);
4271              for (byte[] family : htd.getColumnFamilyNames()) {
4272                scan.addFamily(family);
4273              }
4274              try (ResultScanner scanner = table.getScanner(scan)) {
4275                scanner.next();
4276              }
4277            }
4278          }
4279        }
4280        return tableAvailable;
4281      }
4282    };
4283  }
4284
4285  /**
4286   * Wait until no regions in transition.
4287   * @param timeout How long to wait.
4288   * @throws IOException
4289   */
4290  public void waitUntilNoRegionsInTransition(final long timeout) throws IOException {
4291    waitFor(timeout, predicateNoRegionsInTransition());
4292  }
4293
4294  /**
4295   * Wait until no regions in transition. (time limit 15min)
4296   * @throws IOException
4297   */
4298  public void waitUntilNoRegionsInTransition() throws IOException {
4299    waitUntilNoRegionsInTransition(15 * 60000);
4300  }
4301
4302  /**
4303   * Wait until labels is ready in VisibilityLabelsCache.
4304   * @param timeoutMillis
4305   * @param labels
4306   */
4307  public void waitLabelAvailable(long timeoutMillis, final String... labels) {
4308    final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get();
4309    waitFor(timeoutMillis, new Waiter.ExplainingPredicate<RuntimeException>() {
4310
4311      @Override
4312      public boolean evaluate() {
4313        for (String label : labels) {
4314          if (labelsCache.getLabelOrdinal(label) == 0) {
4315            return false;
4316          }
4317        }
4318        return true;
4319      }
4320
4321      @Override
4322      public String explainFailure() {
4323        for (String label : labels) {
4324          if (labelsCache.getLabelOrdinal(label) == 0) {
4325            return label + " is not available yet";
4326          }
4327        }
4328        return "";
4329      }
4330    });
4331  }
4332
4333  /**
4334   * Create a set of column descriptors with the combination of compression,
4335   * encoding, bloom codecs available.
4336   * @return the list of column descriptors
4337   */
4338  public static List<HColumnDescriptor> generateColumnDescriptors() {
4339    return generateColumnDescriptors("");
4340  }
4341
4342  /**
4343   * Create a set of column descriptors with the combination of compression,
4344   * encoding, bloom codecs available.
4345   * @param prefix family names prefix
4346   * @return the list of column descriptors
4347   */
4348  public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
4349    List<HColumnDescriptor> htds = new ArrayList<>();
4350    long familyId = 0;
4351    for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
4352      for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
4353        for (BloomType bloomType: BloomType.values()) {
4354          String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
4355          HColumnDescriptor htd = new HColumnDescriptor(name);
4356          htd.setCompressionType(compressionType);
4357          htd.setDataBlockEncoding(encodingType);
4358          htd.setBloomFilterType(bloomType);
4359          htds.add(htd);
4360          familyId++;
4361        }
4362      }
4363    }
4364    return htds;
4365  }
4366
4367  /**
4368   * Get supported compression algorithms.
4369   * @return supported compression algorithms.
4370   */
4371  public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
4372    String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
4373    List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
4374    for (String algoName : allAlgos) {
4375      try {
4376        Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
4377        algo.getCompressor();
4378        supportedAlgos.add(algo);
4379      } catch (Throwable t) {
4380        // this algo is not available
4381      }
4382    }
4383    return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
4384  }
4385
4386  public Result getClosestRowBefore(Region r, byte[] row, byte[] family) throws IOException {
4387    Scan scan = new Scan(row);
4388    scan.setSmall(true);
4389    scan.setCaching(1);
4390    scan.setReversed(true);
4391    scan.addFamily(family);
4392    try (RegionScanner scanner = r.getScanner(scan)) {
4393      List<Cell> cells = new ArrayList<>(1);
4394      scanner.next(cells);
4395      if (r.getRegionInfo().isMetaRegion() && !isTargetTable(row, cells.get(0))) {
4396        return null;
4397      }
4398      return Result.create(cells);
4399    }
4400  }
4401
4402  private boolean isTargetTable(final byte[] inRow, Cell c) {
4403    String inputRowString = Bytes.toString(inRow);
4404    int i = inputRowString.indexOf(HConstants.DELIMITER);
4405    String outputRowString = Bytes.toString(c.getRowArray(), c.getRowOffset(), c.getRowLength());
4406    int o = outputRowString.indexOf(HConstants.DELIMITER);
4407    return inputRowString.substring(0, i).equals(outputRowString.substring(0, o));
4408  }
4409
4410  /**
4411   * Sets up {@link MiniKdc} for testing security.
4412   * Uses {@link HBaseKerberosUtils} to set the given keytab file as
4413   * {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}.
4414   * FYI, there is also the easier-to-use kerby KDC server and utility for using it,
4415   * {@link org.apache.hadoop.hbase.util.SimpleKdcServerUtil}. The kerby KDC server is preferred;
4416   * less baggage. It came in in HBASE-5291.
4417   */
4418  public MiniKdc setupMiniKdc(File keytabFile) throws Exception {
4419    Properties conf = MiniKdc.createConf();
4420    conf.put(MiniKdc.DEBUG, true);
4421    MiniKdc kdc = null;
4422    File dir = null;
4423    // There is time lag between selecting a port and trying to bind with it. It's possible that
4424    // another service captures the port in between which'll result in BindException.
4425    boolean bindException;
4426    int numTries = 0;
4427    do {
4428      try {
4429        bindException = false;
4430        dir = new File(getDataTestDir("kdc").toUri().getPath());
4431        kdc = new MiniKdc(conf, dir);
4432        kdc.start();
4433      } catch (BindException e) {
4434        FileUtils.deleteDirectory(dir);  // clean directory
4435        numTries++;
4436        if (numTries == 3) {
4437          LOG.error("Failed setting up MiniKDC. Tried " + numTries + " times.");
4438          throw e;
4439        }
4440        LOG.error("BindException encountered when setting up MiniKdc. Trying again.");
4441        bindException = true;
4442      }
4443    } while (bindException);
4444    HBaseKerberosUtils.setKeytabFileForTesting(keytabFile.getAbsolutePath());
4445    return kdc;
4446  }
4447
4448  public int getNumHFiles(final TableName tableName, final byte[] family) {
4449    int numHFiles = 0;
4450    for (RegionServerThread regionServerThread : getMiniHBaseCluster().getRegionServerThreads()) {
4451      numHFiles+= getNumHFilesForRS(regionServerThread.getRegionServer(), tableName,
4452                                    family);
4453    }
4454    return numHFiles;
4455  }
4456
4457  public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName,
4458                               final byte[] family) {
4459    int numHFiles = 0;
4460    for (Region region : rs.getRegions(tableName)) {
4461      numHFiles += region.getStore(family).getStorefilesCount();
4462    }
4463    return numHFiles;
4464  }
4465
4466  public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd, TableDescriptor rtd) {
4467    assertEquals(ltd.getValues().hashCode(), rtd.getValues().hashCode());
4468    Collection<ColumnFamilyDescriptor> ltdFamilies = Arrays.asList(ltd.getColumnFamilies());
4469    Collection<ColumnFamilyDescriptor> rtdFamilies = Arrays.asList(rtd.getColumnFamilies());
4470    assertEquals(ltdFamilies.size(), rtdFamilies.size());
4471    for (Iterator<ColumnFamilyDescriptor> it = ltdFamilies.iterator(), it2 =
4472         rtdFamilies.iterator(); it.hasNext();) {
4473      assertEquals(0,
4474          ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next()));
4475    }
4476  }
4477
4478  /**
4479   * Await the successful return of {@code condition}, sleeping {@code sleepMillis} between
4480   * invocations.
4481   */
4482  public static void await(final long sleepMillis, final BooleanSupplier condition)
4483    throws InterruptedException {
4484    try {
4485      while (!condition.getAsBoolean()) {
4486        Thread.sleep(sleepMillis);
4487      }
4488    } catch (RuntimeException e) {
4489      if (e.getCause() instanceof AssertionError) {
4490        throw (AssertionError) e.getCause();
4491      }
4492      throw e;
4493    }
4494  }
4495}