001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.asyncfs; 019 020import java.io.File; 021import java.io.IOException; 022import org.apache.hadoop.conf.Configuration; 023import org.apache.hadoop.fs.Path; 024import org.apache.hadoop.hbase.HBaseCommonTestingUtil; 025import org.apache.hadoop.hdfs.MiniDFSCluster; 026import org.slf4j.Logger; 027import org.slf4j.LoggerFactory; 028 029public abstract class AsyncFSTestBase { 030 031 private static final Logger LOG = LoggerFactory.getLogger(AsyncFSTestBase.class); 032 033 protected static final HBaseCommonTestingUtil UTIL = new HBaseCommonTestingUtil(); 034 035 protected static File CLUSTER_TEST_DIR; 036 037 protected static MiniDFSCluster CLUSTER; 038 039 private static boolean deleteOnExit() { 040 String v = System.getProperty("hbase.testing.preserve.testdir"); 041 // Let default be true, to delete on exit. 042 return v == null ? true : !Boolean.parseBoolean(v); 043 } 044 045 /** 046 * Creates a directory for the cluster, under the test data 047 */ 048 protected static void setupClusterTestDir() { 049 // Using randomUUID ensures that multiple clusters can be launched by 050 // a same test, if it stops & starts them 051 Path testDir = 052 UTIL.getDataTestDir("cluster_" + HBaseCommonTestingUtil.getRandomUUID().toString()); 053 CLUSTER_TEST_DIR = new File(testDir.toString()).getAbsoluteFile(); 054 // Have it cleaned up on exit 055 boolean b = deleteOnExit(); 056 if (b) { 057 CLUSTER_TEST_DIR.deleteOnExit(); 058 } 059 LOG.info("Created new mini-cluster data directory: {}, deleteOnExit={}", CLUSTER_TEST_DIR, b); 060 } 061 062 private static String createDirAndSetProperty(final String property) { 063 return createDirAndSetProperty(property, property); 064 } 065 066 private static String createDirAndSetProperty(final String relPath, String property) { 067 String path = UTIL.getDataTestDir(relPath).toString(); 068 System.setProperty(property, path); 069 UTIL.getConfiguration().set(property, path); 070 new File(path).mkdirs(); 071 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf"); 072 return path; 073 } 074 075 private static void createDirsAndSetProperties() throws IOException { 076 setupClusterTestDir(); 077 System.setProperty("test.build.data", CLUSTER_TEST_DIR.getPath()); 078 createDirAndSetProperty("test.cache.data"); 079 createDirAndSetProperty("hadoop.tmp.dir"); 080 081 // Frustrate yarn's and hdfs's attempts at writing /tmp. 082 // Below is fragile. Make it so we just interpolate any 'tmp' reference. 083 createDirAndSetProperty("dfs.journalnode.edits.dir"); 084 createDirAndSetProperty("dfs.datanode.shared.file.descriptor.paths"); 085 createDirAndSetProperty("nfs.dump.dir"); 086 createDirAndSetProperty("java.io.tmpdir"); 087 createDirAndSetProperty("dfs.journalnode.edits.dir"); 088 createDirAndSetProperty("dfs.provided.aliasmap.inmemory.leveldb.dir"); 089 createDirAndSetProperty("fs.s3a.committer.staging.tmp.path"); 090 } 091 092 protected static void startMiniDFSCluster(int servers) throws IOException { 093 if (CLUSTER != null) { 094 throw new IllegalStateException("Already started"); 095 } 096 createDirsAndSetProperties(); 097 098 Configuration conf = UTIL.getConfiguration(); 099 100 CLUSTER = new MiniDFSCluster.Builder(conf).numDataNodes(servers).build(); 101 CLUSTER.waitClusterUp(); 102 } 103 104 protected static void shutdownMiniDFSCluster() { 105 if (CLUSTER != null) { 106 CLUSTER.shutdown(true); 107 CLUSTER = null; 108 } 109 } 110}