001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.asyncfs; 019 020import java.io.File; 021import java.io.IOException; 022import org.apache.hadoop.conf.Configuration; 023import org.apache.hadoop.fs.Path; 024import org.apache.hadoop.hbase.HBaseCommonTestingUtil; 025import org.apache.hadoop.hdfs.DFSConfigKeys; 026import org.apache.hadoop.hdfs.MiniDFSCluster; 027import org.slf4j.Logger; 028import org.slf4j.LoggerFactory; 029 030public abstract class AsyncFSTestBase { 031 032 private static final Logger LOG = LoggerFactory.getLogger(AsyncFSTestBase.class); 033 034 protected static final HBaseCommonTestingUtil UTIL = new HBaseCommonTestingUtil(); 035 036 protected static File CLUSTER_TEST_DIR; 037 038 protected static MiniDFSCluster CLUSTER; 039 040 private static boolean deleteOnExit() { 041 String v = System.getProperty("hbase.testing.preserve.testdir"); 042 // Let default be true, to delete on exit. 043 return v == null ? true : !Boolean.parseBoolean(v); 044 } 045 046 /** 047 * Creates a directory for the cluster, under the test data 048 */ 049 protected static void setupClusterTestDir() { 050 // Using randomUUID ensures that multiple clusters can be launched by 051 // a same test, if it stops & starts them 052 Path testDir = 053 UTIL.getDataTestDir("cluster_" + HBaseCommonTestingUtil.getRandomUUID().toString()); 054 CLUSTER_TEST_DIR = new File(testDir.toString()).getAbsoluteFile(); 055 // Have it cleaned up on exit 056 boolean b = deleteOnExit(); 057 if (b) { 058 CLUSTER_TEST_DIR.deleteOnExit(); 059 } 060 LOG.info("Created new mini-cluster data directory: {}, deleteOnExit={}", CLUSTER_TEST_DIR, b); 061 } 062 063 private static String createDirAndSetProperty(final String property) { 064 return createDirAndSetProperty(property, property); 065 } 066 067 private static String createDirAndSetProperty(final String relPath, String property) { 068 String path = UTIL.getDataTestDir(relPath).toString(); 069 System.setProperty(property, path); 070 UTIL.getConfiguration().set(property, path); 071 new File(path).mkdirs(); 072 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf"); 073 return path; 074 } 075 076 private static void createDirsAndSetProperties() throws IOException { 077 setupClusterTestDir(); 078 System.setProperty("test.build.data", CLUSTER_TEST_DIR.getPath()); 079 createDirAndSetProperty("test.cache.data"); 080 createDirAndSetProperty("hadoop.tmp.dir"); 081 082 // Frustrate yarn's and hdfs's attempts at writing /tmp. 083 // Below is fragile. Make it so we just interpolate any 'tmp' reference. 084 createDirAndSetProperty("dfs.journalnode.edits.dir"); 085 createDirAndSetProperty("dfs.datanode.shared.file.descriptor.paths"); 086 createDirAndSetProperty("nfs.dump.dir"); 087 createDirAndSetProperty("java.io.tmpdir"); 088 createDirAndSetProperty("dfs.journalnode.edits.dir"); 089 createDirAndSetProperty("dfs.provided.aliasmap.inmemory.leveldb.dir"); 090 createDirAndSetProperty("fs.s3a.committer.staging.tmp.path"); 091 092 // disable metrics logger since it depend on commons-logging internal classes and we do not want 093 // commons-logging on our classpath 094 UTIL.getConfiguration().setInt(DFSConfigKeys.DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_KEY, 0); 095 UTIL.getConfiguration().setInt(DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY, 0); 096 } 097 098 protected static void startMiniDFSCluster(int servers) throws IOException { 099 if (CLUSTER != null) { 100 throw new IllegalStateException("Already started"); 101 } 102 createDirsAndSetProperties(); 103 104 Configuration conf = UTIL.getConfiguration(); 105 106 CLUSTER = new MiniDFSCluster.Builder(conf).numDataNodes(servers).build(); 107 CLUSTER.waitClusterUp(); 108 } 109 110 protected static void shutdownMiniDFSCluster() { 111 if (CLUSTER != null) { 112 CLUSTER.shutdown(true); 113 CLUSTER = null; 114 } 115 } 116}