001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import java.io.IOException; 021import java.util.UUID; 022import org.apache.hadoop.conf.Configuration; 023import org.apache.hadoop.fs.Path; 024import org.apache.hadoop.hbase.HBaseClassTestRule; 025import org.apache.hadoop.hbase.HConstants; 026import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; 027import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; 028import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; 029import org.apache.hadoop.hbase.testclassification.LargeTests; 030import org.junit.BeforeClass; 031import org.junit.ClassRule; 032import org.junit.experimental.categories.Category; 033 034/** 035 * This class tests that the use of a temporary snapshot directory supports snapshot functionality 036 * while the temporary directory is on the same file system as the root directory 037 * <p> 038 * This is an end-to-end test for the snapshot utility 039 */ 040@Category(LargeTests.class) 041public class TestSnapshotDFSTemporaryDirectory extends TestSnapshotTemporaryDirectory { 042 043 @ClassRule 044 public static final HBaseClassTestRule CLASS_RULE = 045 HBaseClassTestRule.forClass(TestSnapshotDFSTemporaryDirectory.class); 046 047 /** 048 * Setup the config for the cluster 049 * @throws Exception on failure 050 */ 051 @BeforeClass 052 public static void setupCluster() throws Exception { 053 setupConf(UTIL.getConfiguration()); 054 UTIL.startMiniCluster(NUM_RS); 055 admin = UTIL.getAdmin(); 056 } 057 058 private static void setupConf(Configuration conf) throws IOException { 059 // disable the ui 060 conf.setInt("hbase.regionsever.info.port", -1); 061 // change the flush size to a small amount, regulating number of store files 062 conf.setInt("hbase.hregion.memstore.flush.size", 25000); 063 // so make sure we get a compaction when doing a load, but keep around some 064 // files in the store 065 conf.setInt("hbase.hstore.compaction.min", 10); 066 conf.setInt("hbase.hstore.compactionThreshold", 10); 067 // block writes if we get to 12 store files 068 conf.setInt("hbase.hstore.blockingStoreFiles", 12); 069 // Enable snapshot 070 conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); 071 conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 072 ConstantSizeRegionSplitPolicy.class.getName()); 073 074 String snapshotPath = UTIL.getDefaultRootDirPath().toString() + Path.SEPARATOR 075 + UUID.randomUUID().toString() + Path.SEPARATOR + ".tmpdir" + Path.SEPARATOR; 076 conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, 077 "file://" + new Path(snapshotPath).toUri()); 078 } 079}