001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master.cleaner; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertTrue; 023 024import java.io.IOException; 025import org.apache.hadoop.conf.Configuration; 026import org.apache.hadoop.fs.FileStatus; 027import org.apache.hadoop.fs.FileSystem; 028import org.apache.hadoop.fs.Path; 029import org.apache.hadoop.hbase.ChoreService; 030import org.apache.hadoop.hbase.CoordinatedStateManager; 031import org.apache.hadoop.hbase.HBaseClassTestRule; 032import org.apache.hadoop.hbase.HBaseTestingUtility; 033import org.apache.hadoop.hbase.HRegionInfo; 034import org.apache.hadoop.hbase.Server; 035import org.apache.hadoop.hbase.ServerName; 036import org.apache.hadoop.hbase.TableName; 037import org.apache.hadoop.hbase.client.ClusterConnection; 038import org.apache.hadoop.hbase.client.Connection; 039import org.apache.hadoop.hbase.io.HFileLink; 040import org.apache.hadoop.hbase.testclassification.MasterTests; 041import org.apache.hadoop.hbase.testclassification.MediumTests; 042import org.apache.hadoop.hbase.util.FSUtils; 043import org.apache.hadoop.hbase.util.HFileArchiveUtil; 044import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; 045import org.apache.hadoop.hbase.zookeeper.ZKWatcher; 046import org.junit.ClassRule; 047import org.junit.Rule; 048import org.junit.Test; 049import org.junit.experimental.categories.Category; 050import org.junit.rules.TestName; 051 052/** 053 * Test the HFileLink Cleaner. 054 * HFiles with links cannot be deleted until a link is present. 055 */ 056@Category({MasterTests.class, MediumTests.class}) 057public class TestHFileLinkCleaner { 058 059 @ClassRule 060 public static final HBaseClassTestRule CLASS_RULE = 061 HBaseClassTestRule.forClass(TestHFileLinkCleaner.class); 062 063 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 064 065 @Rule 066 public TestName name = new TestName(); 067 068 @Test 069 public void testHFileLinkCleaning() throws Exception { 070 Configuration conf = TEST_UTIL.getConfiguration(); 071 FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir()); 072 conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName()); 073 Path rootDir = FSUtils.getRootDir(conf); 074 FileSystem fs = FileSystem.get(conf); 075 076 final TableName tableName = TableName.valueOf(name.getMethodName()); 077 final TableName tableLinkName = TableName.valueOf(name.getMethodName() + "-link"); 078 final String hfileName = "1234567890"; 079 final String familyName = "cf"; 080 081 HRegionInfo hri = new HRegionInfo(tableName); 082 HRegionInfo hriLink = new HRegionInfo(tableLinkName); 083 084 Path archiveDir = HFileArchiveUtil.getArchivePath(conf); 085 Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, 086 tableName, hri.getEncodedName(), familyName); 087 Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, 088 tableLinkName, hriLink.getEncodedName(), familyName); 089 090 // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf); 091 Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName); 092 fs.mkdirs(familyPath); 093 Path hfilePath = new Path(familyPath, hfileName); 094 fs.createNewFile(hfilePath); 095 096 // Create link to hfile 097 Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName, 098 hriLink.getEncodedName(), familyName); 099 fs.mkdirs(familyLinkPath); 100 HFileLink.create(conf, fs, familyLinkPath, hri, hfileName); 101 Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName); 102 assertTrue(fs.exists(linkBackRefDir)); 103 FileStatus[] backRefs = fs.listStatus(linkBackRefDir); 104 assertEquals(1, backRefs.length); 105 Path linkBackRef = backRefs[0].getPath(); 106 107 // Initialize cleaner 108 final long ttl = 1000; 109 conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl); 110 Server server = new DummyServer(); 111 HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir); 112 113 // Link backref cannot be removed 114 cleaner.chore(); 115 assertTrue(fs.exists(linkBackRef)); 116 assertTrue(fs.exists(hfilePath)); 117 118 // Link backref can be removed 119 fs.rename(FSUtils.getTableDir(rootDir, tableLinkName), 120 FSUtils.getTableDir(archiveDir, tableLinkName)); 121 cleaner.chore(); 122 assertFalse("Link should be deleted", fs.exists(linkBackRef)); 123 124 // HFile can be removed 125 Thread.sleep(ttl * 2); 126 cleaner.chore(); 127 assertFalse("HFile should be deleted", fs.exists(hfilePath)); 128 129 // Remove everything 130 for (int i = 0; i < 4; ++i) { 131 Thread.sleep(ttl * 2); 132 cleaner.chore(); 133 } 134 assertFalse("HFile should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableName))); 135 assertFalse("Link should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableLinkName))); 136 } 137 138 private static Path getFamilyDirPath (final Path rootDir, final TableName table, 139 final String region, final String family) { 140 return new Path(new Path(FSUtils.getTableDir(rootDir, table), region), family); 141 } 142 143 static class DummyServer implements Server { 144 145 @Override 146 public Configuration getConfiguration() { 147 return TEST_UTIL.getConfiguration(); 148 } 149 150 @Override 151 public ZKWatcher getZooKeeper() { 152 try { 153 return new ZKWatcher(getConfiguration(), "dummy server", this); 154 } catch (IOException e) { 155 e.printStackTrace(); 156 } 157 return null; 158 } 159 160 @Override 161 public CoordinatedStateManager getCoordinatedStateManager() { 162 return null; 163 } 164 165 @Override 166 public ClusterConnection getConnection() { 167 return null; 168 } 169 170 @Override 171 public MetaTableLocator getMetaTableLocator() { 172 return null; 173 } 174 175 @Override 176 public ServerName getServerName() { 177 return ServerName.valueOf("regionserver,60020,000000"); 178 } 179 180 @Override 181 public void abort(String why, Throwable e) {} 182 183 @Override 184 public boolean isAborted() { 185 return false; 186 } 187 188 @Override 189 public void stop(String why) {} 190 191 @Override 192 public boolean isStopped() { 193 return false; 194 } 195 196 @Override 197 public ChoreService getChoreService() { 198 return null; 199 } 200 201 @Override 202 public ClusterConnection getClusterConnection() { 203 // TODO Auto-generated method stub 204 return null; 205 } 206 207 @Override 208 public FileSystem getFileSystem() { 209 return null; 210 } 211 212 @Override 213 public boolean isStopping() { 214 return false; 215 } 216 217 @Override 218 public Connection createConnection(Configuration conf) throws IOException { 219 return null; 220 } 221 } 222}