001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.snapshot;
019
020import static org.junit.Assert.assertFalse;
021import java.util.Iterator;
022import java.util.Map;
023import org.apache.hadoop.conf.Configuration;
024import org.apache.hadoop.fs.FileSystem;
025import org.apache.hadoop.fs.Path;
026import org.apache.hadoop.hbase.HBaseClassTestRule;
027import org.apache.hadoop.hbase.HBaseTestingUtility;
028import org.apache.hadoop.hbase.TableName;
029import org.apache.hadoop.hbase.client.Admin;
030import org.apache.hadoop.hbase.testclassification.LargeTests;
031import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
032import org.apache.hadoop.hbase.util.Bytes;
033import org.junit.After;
034import org.junit.AfterClass;
035import org.junit.Before;
036import org.junit.BeforeClass;
037import org.junit.ClassRule;
038import org.junit.Ignore;
039import org.junit.Rule;
040import org.junit.Test;
041import org.junit.experimental.categories.Category;
042import org.junit.rules.TestName;
043import org.slf4j.Logger;
044import org.slf4j.LoggerFactory;
045
046/**
047 * Tests that are adjunct to {@link TestExportSnapshot}. They used to be in same test suite but
048 * the test suite ran too close to the maximum time limit so we split these out. Uses
049 * facility from TestExportSnapshot where possible.
050 * @see TestExportSnapshot
051 */
052@Ignore // HBASE-24493
053@Category({VerySlowMapReduceTests.class, LargeTests.class})
054public class TestExportSnapshotAdjunct {
055  private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshotAdjunct.class);
056
057  @ClassRule
058  public static final HBaseClassTestRule CLASS_RULE =
059      HBaseClassTestRule.forClass(TestExportSnapshotAdjunct.class);
060  @Rule
061  public final TestName testName = new TestName();
062
063  protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
064
065  protected TableName tableName;
066  private String emptySnapshotName;
067  private String snapshotName;
068  private int tableNumFiles;
069  private Admin admin;
070
071  @BeforeClass
072  public static void setUpBeforeClass() throws Exception {
073    TestExportSnapshot.setUpBaseConf(TEST_UTIL.getConfiguration());
074    TEST_UTIL.startMiniCluster(3);
075    TEST_UTIL.startMiniMapReduceCluster();
076  }
077
078  /**
079   * Check for references to '/tmp'. We are trying to avoid having references to outside of the
080   * test data dir when running tests. References outside of the test dir makes it so concurrent
081   * tests can stamp on each other by mistake. This check is for references to the 'tmp'.
082   *
083   * This is a strange place for this test but I want somewhere where the configuration is
084   * full -- filed w/ hdfs and mapreduce configurations.
085   */
086  private void checkForReferencesToTmpDir() {
087    Configuration conf = TEST_UTIL.getConfiguration();
088    for (Iterator<Map.Entry<String, String>> i = conf.iterator(); i.hasNext();) {
089      Map.Entry<String, String> e = i.next();
090      if (e.getKey().contains("original.hbase.dir")) {
091        continue;
092      }
093      if (e.getValue().contains("java.io.tmpdir")) {
094        continue;
095      }
096      if (e.getValue().contains("hadoop.tmp.dir")) {
097        continue;
098      }
099      if (e.getValue().contains("hbase.tmp.dir")) {
100        continue;
101      }
102      assertFalse(e.getKey() + " " + e.getValue(), e.getValue().contains("tmp"));
103    }
104  }
105
106  @AfterClass
107  public static void tearDownAfterClass() throws Exception {
108    TEST_UTIL.shutdownMiniMapReduceCluster();
109    TEST_UTIL.shutdownMiniCluster();
110  }
111
112  /**
113   * Create a table and take a snapshot of the table used by the export test.
114   */
115  @Before
116  public void setUp() throws Exception {
117    this.admin = TEST_UTIL.getAdmin();
118
119    tableName = TableName.valueOf("testtb-" + testName.getMethodName());
120    snapshotName = "snaptb0-" + testName.getMethodName();
121    emptySnapshotName = "emptySnaptb0-" + testName.getMethodName();
122
123    // Create Table
124    SnapshotTestingUtils.createPreSplitTable(TEST_UTIL, tableName, 2, TestExportSnapshot.FAMILY);
125
126    // Take an empty snapshot
127    admin.snapshot(emptySnapshotName, tableName);
128
129    // Add some rows
130    SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50,
131      TestExportSnapshot.FAMILY);
132    tableNumFiles = admin.getRegions(tableName).size();
133
134    // take a snapshot
135    admin.snapshot(snapshotName, tableName);
136  }
137
138  @After
139  public void tearDown() throws Exception {
140    TEST_UTIL.deleteTable(tableName);
141    SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin());
142    SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
143  }
144
145  /**
146   * Check that ExportSnapshot will succeed if something fails but the retry succeed.
147   */
148  @Test
149  public void testExportRetry() throws Exception {
150    Path copyDir = TestExportSnapshot.getLocalDestinationDir(TEST_UTIL);
151    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
152    conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true);
153    conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 2);
154    conf.setInt("mapreduce.map.maxattempts", 3);
155    TestExportSnapshot.testExportFileSystemState(conf, tableName,
156      Bytes.toBytes(snapshotName), Bytes.toBytes(snapshotName),
157      tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true,
158      null, true);
159  }
160
161  /**
162   * Check that ExportSnapshot will fail if we inject failure more times than MR will retry.
163   */
164  @Test
165  public void testExportFailure() throws Exception {
166    Path copyDir = TestExportSnapshot.getLocalDestinationDir(TEST_UTIL);
167    FileSystem fs = FileSystem.get(copyDir.toUri(), new Configuration());
168    copyDir = copyDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
169    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
170    conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true);
171    conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 4);
172    conf.setInt("mapreduce.map.maxattempts", 3);
173    TestExportSnapshot.testExportFileSystemState(conf, tableName,
174      Bytes.toBytes(snapshotName), Bytes.toBytes(snapshotName),
175      tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, null, false);
176  }
177}