001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import java.io.IOException; 021import java.util.UUID; 022import org.apache.hadoop.conf.Configuration; 023import org.apache.hadoop.fs.Path; 024import org.apache.hadoop.hbase.HBaseClassTestRule; 025import org.apache.hadoop.hbase.HConstants; 026import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; 027import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; 028import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; 029import org.apache.hadoop.hbase.testclassification.LargeTests; 030import org.junit.BeforeClass; 031import org.junit.ClassRule; 032import org.junit.experimental.categories.Category; 033 034/** 035 * This class tests that the use of a temporary snapshot directory supports snapshot functionality 036 * while the temporary directory is on the same file system as the root directory 037 * <p> 038 * This is an end-to-end test for the snapshot utility 039 */ 040@Category(LargeTests.class) 041public class TestSnapshotDFSTemporaryDirectory 042 extends TestSnapshotTemporaryDirectory { 043 044 @ClassRule 045 public static final HBaseClassTestRule CLASS_RULE = 046 HBaseClassTestRule.forClass(TestSnapshotDFSTemporaryDirectory.class); 047 048 /** 049 * Setup the config for the cluster 050 * 051 * @throws Exception on failure 052 */ 053 @BeforeClass public static void setupCluster() throws Exception { 054 setupConf(UTIL.getConfiguration()); 055 UTIL.startMiniCluster(NUM_RS); 056 admin = UTIL.getHBaseAdmin(); 057 } 058 059 private static void setupConf(Configuration conf) throws IOException { 060 // disable the ui 061 conf.setInt("hbase.regionsever.info.port", -1); 062 // change the flush size to a small amount, regulating number of store files 063 conf.setInt("hbase.hregion.memstore.flush.size", 25000); 064 // so make sure we get a compaction when doing a load, but keep around some 065 // files in the store 066 conf.setInt("hbase.hstore.compaction.min", 10); 067 conf.setInt("hbase.hstore.compactionThreshold", 10); 068 // block writes if we get to 12 store files 069 conf.setInt("hbase.hstore.blockingStoreFiles", 12); 070 // Enable snapshot 071 conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); 072 conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 073 ConstantSizeRegionSplitPolicy.class.getName()); 074 075 String snapshotPath = UTIL.getDefaultRootDirPath().toString() + Path.SEPARATOR + 076 UUID.randomUUID().toString() + Path.SEPARATOR + ".tmpdir" + Path.SEPARATOR; 077 conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, "file://" + new Path(snapshotPath).toUri()); 078 } 079}