001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master.cleaner; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertTrue; 023 024import java.io.IOException; 025import org.apache.hadoop.conf.Configuration; 026import org.apache.hadoop.fs.FileStatus; 027import org.apache.hadoop.fs.FileSystem; 028import org.apache.hadoop.fs.Path; 029import org.apache.hadoop.hbase.ChoreService; 030import org.apache.hadoop.hbase.CoordinatedStateManager; 031import org.apache.hadoop.hbase.HBaseClassTestRule; 032import org.apache.hadoop.hbase.HBaseTestingUtility; 033import org.apache.hadoop.hbase.Server; 034import org.apache.hadoop.hbase.ServerName; 035import org.apache.hadoop.hbase.TableName; 036import org.apache.hadoop.hbase.client.ClusterConnection; 037import org.apache.hadoop.hbase.client.Connection; 038import org.apache.hadoop.hbase.client.RegionInfo; 039import org.apache.hadoop.hbase.client.RegionInfoBuilder; 040import org.apache.hadoop.hbase.io.HFileLink; 041import org.apache.hadoop.hbase.testclassification.MasterTests; 042import org.apache.hadoop.hbase.testclassification.MediumTests; 043import org.apache.hadoop.hbase.util.CommonFSUtils; 044import org.apache.hadoop.hbase.util.HFileArchiveUtil; 045import org.apache.hadoop.hbase.zookeeper.ZKWatcher; 046import org.junit.AfterClass; 047import org.junit.BeforeClass; 048import org.junit.ClassRule; 049import org.junit.Rule; 050import org.junit.Test; 051import org.junit.experimental.categories.Category; 052import org.junit.rules.TestName; 053 054/** 055 * Test the HFileLink Cleaner. HFiles with links cannot be deleted until a link is present. 056 */ 057@Category({ MasterTests.class, MediumTests.class }) 058public class TestHFileLinkCleaner { 059 060 @ClassRule 061 public static final HBaseClassTestRule CLASS_RULE = 062 HBaseClassTestRule.forClass(TestHFileLinkCleaner.class); 063 064 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 065 066 private static DirScanPool POOL; 067 068 @Rule 069 public TestName name = new TestName(); 070 071 @BeforeClass 072 public static void setUp() { 073 POOL = new DirScanPool(TEST_UTIL.getConfiguration()); 074 } 075 076 @AfterClass 077 public static void tearDown() { 078 POOL.shutdownNow(); 079 } 080 081 @Test 082 public void testHFileLinkCleaning() throws Exception { 083 Configuration conf = TEST_UTIL.getConfiguration(); 084 CommonFSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir()); 085 conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName()); 086 Path rootDir = CommonFSUtils.getRootDir(conf); 087 FileSystem fs = FileSystem.get(conf); 088 089 final TableName tableName = TableName.valueOf(name.getMethodName()); 090 final TableName tableLinkName = TableName.valueOf(name.getMethodName() + "-link"); 091 final String hfileName = "1234567890"; 092 final String familyName = "cf"; 093 094 RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build(); 095 RegionInfo hriLink = RegionInfoBuilder.newBuilder(tableLinkName).build(); 096 097 Path archiveDir = HFileArchiveUtil.getArchivePath(conf); 098 Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, 099 tableName, hri.getEncodedName(), familyName); 100 101 // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf); 102 Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName); 103 fs.mkdirs(familyPath); 104 Path hfilePath = new Path(familyPath, hfileName); 105 fs.createNewFile(hfilePath); 106 107 // Create link to hfile 108 Path familyLinkPath = 109 getFamilyDirPath(rootDir, tableLinkName, hriLink.getEncodedName(), familyName); 110 fs.mkdirs(familyLinkPath); 111 HFileLink.create(conf, fs, familyLinkPath, hri, hfileName); 112 Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName); 113 assertTrue(fs.exists(linkBackRefDir)); 114 FileStatus[] backRefs = fs.listStatus(linkBackRefDir); 115 assertEquals(1, backRefs.length); 116 Path linkBackRef = backRefs[0].getPath(); 117 118 // Initialize cleaner 119 final long ttl = 1000; 120 conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl); 121 Server server = new DummyServer(); 122 HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir, POOL); 123 124 // Link backref cannot be removed 125 cleaner.chore(); 126 assertTrue(fs.exists(linkBackRef)); 127 assertTrue(fs.exists(hfilePath)); 128 129 // Link backref can be removed 130 fs.rename(CommonFSUtils.getTableDir(rootDir, tableLinkName), 131 CommonFSUtils.getTableDir(archiveDir, tableLinkName)); 132 cleaner.chore(); 133 assertFalse("Link should be deleted", fs.exists(linkBackRef)); 134 135 // HFile can be removed 136 Thread.sleep(ttl * 2); 137 cleaner.chore(); 138 assertFalse("HFile should be deleted", fs.exists(hfilePath)); 139 140 // Remove everything 141 for (int i = 0; i < 4; ++i) { 142 Thread.sleep(ttl * 2); 143 cleaner.chore(); 144 } 145 assertFalse("HFile should be deleted", 146 fs.exists(CommonFSUtils.getTableDir(archiveDir, tableName))); 147 assertFalse("Link should be deleted", 148 fs.exists(CommonFSUtils.getTableDir(archiveDir, tableLinkName))); 149 } 150 151 private static Path getFamilyDirPath(final Path rootDir, final TableName table, 152 final String region, final String family) { 153 return new Path(new Path(CommonFSUtils.getTableDir(rootDir, table), region), family); 154 } 155 156 static class DummyServer implements Server { 157 158 @Override 159 public Configuration getConfiguration() { 160 return TEST_UTIL.getConfiguration(); 161 } 162 163 @Override 164 public ZKWatcher getZooKeeper() { 165 try { 166 return new ZKWatcher(getConfiguration(), "dummy server", this); 167 } catch (IOException e) { 168 e.printStackTrace(); 169 } 170 return null; 171 } 172 173 @Override 174 public CoordinatedStateManager getCoordinatedStateManager() { 175 return null; 176 } 177 178 @Override 179 public ClusterConnection getConnection() { 180 return null; 181 } 182 183 @Override 184 public ServerName getServerName() { 185 return ServerName.valueOf("regionserver,60020,000000"); 186 } 187 188 @Override 189 public void abort(String why, Throwable e) {} 190 191 @Override 192 public boolean isAborted() { 193 return false; 194 } 195 196 @Override 197 public void stop(String why) {} 198 199 @Override 200 public boolean isStopped() { 201 return false; 202 } 203 204 @Override 205 public ChoreService getChoreService() { 206 return null; 207 } 208 209 @Override 210 public ClusterConnection getClusterConnection() { 211 // TODO Auto-generated method stub 212 return null; 213 } 214 215 @Override 216 public FileSystem getFileSystem() { 217 return null; 218 } 219 220 @Override 221 public boolean isStopping() { 222 return false; 223 } 224 225 @Override 226 public Connection createConnection(Configuration conf) throws IOException { 227 return null; 228 } 229 } 230}