001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master.cleaner; 019 020import static org.junit.jupiter.api.Assertions.assertFalse; 021import static org.junit.jupiter.api.Assertions.assertTrue; 022import static org.junit.jupiter.api.Assertions.fail; 023 024import java.io.IOException; 025import java.io.UncheckedIOException; 026import java.util.ArrayList; 027import java.util.Iterator; 028import java.util.List; 029import java.util.Map; 030import org.apache.hadoop.conf.Configuration; 031import org.apache.hadoop.fs.FileStatus; 032import org.apache.hadoop.fs.FileSystem; 033import org.apache.hadoop.fs.Path; 034import org.apache.hadoop.hbase.HBaseTestingUtil; 035import org.apache.hadoop.hbase.HConstants; 036import org.apache.hadoop.hbase.Server; 037import org.apache.hadoop.hbase.TableName; 038import org.apache.hadoop.hbase.client.Connection; 039import org.apache.hadoop.hbase.client.TableDescriptor; 040import org.apache.hadoop.hbase.keymeta.KeyManagementService; 041import org.apache.hadoop.hbase.master.HMaster; 042import org.apache.hadoop.hbase.replication.ReplicationException; 043import org.apache.hadoop.hbase.replication.ReplicationFactory; 044import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; 045import org.apache.hadoop.hbase.replication.ReplicationPeers; 046import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; 047import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; 048import org.apache.hadoop.hbase.replication.SyncReplicationState; 049import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner; 050import org.apache.hadoop.hbase.testclassification.MasterTests; 051import org.apache.hadoop.hbase.testclassification.SmallTests; 052import org.apache.hadoop.hbase.util.MockServer; 053import org.apache.hadoop.hbase.util.Pair; 054import org.apache.hadoop.hbase.zookeeper.ZKWatcher; 055import org.junit.jupiter.api.AfterAll; 056import org.junit.jupiter.api.AfterEach; 057import org.junit.jupiter.api.BeforeAll; 058import org.junit.jupiter.api.BeforeEach; 059import org.junit.jupiter.api.Tag; 060import org.junit.jupiter.api.Test; 061import org.slf4j.Logger; 062import org.slf4j.LoggerFactory; 063 064import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; 065 066@Tag(MasterTests.TAG) 067@Tag(SmallTests.TAG) 068public class TestReplicationHFileCleaner { 069 070 private static final Logger LOG = LoggerFactory.getLogger(TestReplicationHFileCleaner.class); 071 private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); 072 private static Server server; 073 private static final TableName tableName = TableName.valueOf("test_cleaner"); 074 private static ReplicationQueueStorage rq; 075 private static ReplicationPeers rp; 076 private static final String peerId = "TestReplicationHFileCleaner"; 077 private static Configuration conf = TEST_UTIL.getConfiguration(); 078 private static FileSystem fs = null; 079 private static Map<String, Object> params; 080 private Path root; 081 082 @BeforeAll 083 public static void setUpBeforeClass() throws Exception { 084 TEST_UTIL.startMiniCluster(); 085 server = new DummyServer(); 086 params = ImmutableMap.of(HMaster.MASTER, server); 087 conf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true); 088 HMaster.decorateMasterConfiguration(conf); 089 TableDescriptor td = ReplicationStorageFactory.createReplicationQueueTableDescriptor(tableName); 090 TEST_UTIL.getAdmin().createTable(td); 091 conf.set(ReplicationStorageFactory.REPLICATION_QUEUE_TABLE_NAME, tableName.getNameAsString()); 092 rp = 093 ReplicationFactory.getReplicationPeers(server.getFileSystem(), server.getZooKeeper(), conf); 094 rp.init(); 095 rq = ReplicationStorageFactory.getReplicationQueueStorage(server.getConnection(), conf); 096 fs = FileSystem.get(conf); 097 } 098 099 @AfterAll 100 public static void tearDownAfterClass() throws Exception { 101 TEST_UTIL.shutdownMiniCluster(); 102 } 103 104 @BeforeEach 105 public void setup() throws ReplicationException, IOException { 106 root = TEST_UTIL.getDataTestDirOnTestFS(); 107 rp.getPeerStorage().addPeer(peerId, 108 ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL.getRpcConnnectionURI()).build(), 109 true, SyncReplicationState.NONE); 110 } 111 112 @AfterEach 113 public void cleanup() throws ReplicationException { 114 try { 115 fs.delete(root, true); 116 } catch (IOException e) { 117 LOG.warn("Failed to delete files recursively from path " + root); 118 } 119 // Remove all HFileRefs (if any) 120 rq.removeHFileRefs(peerId, rq.getReplicableHFiles(peerId)); 121 rp.getPeerStorage().removePeer(peerId); 122 } 123 124 private ReplicationHFileCleaner createCleaner() { 125 ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner(); 126 cleaner.setConf(conf); 127 cleaner.init(params); 128 return cleaner; 129 } 130 131 @Test 132 public void testIsFileDeletable() throws IOException, ReplicationException { 133 // 1. Create a file 134 Path file = new Path(root, "testIsFileDeletableWithNoHFileRefs"); 135 fs.createNewFile(file); 136 // 2. Assert file is successfully created 137 assertTrue(fs.exists(file), "Test file not created!"); 138 ReplicationHFileCleaner cleaner = createCleaner(); 139 // 3. Assert that file as is should be deletable 140 assertTrue(cleaner.isFileDeletable(fs.getFileStatus(file)), 141 "Cleaner should allow to delete this file as there is no hfile reference node " 142 + "for it in the queue."); 143 144 List<Pair<Path, Path>> files = new ArrayList<>(1); 145 files.add(new Pair<>(null, file)); 146 // 4. Add the file to hfile-refs queue 147 rq.addHFileRefs(peerId, files); 148 // 5. Assert file should not be deletable 149 assertFalse(cleaner.isFileDeletable(fs.getFileStatus(file)), 150 "Cleaner should not allow to delete this file as there is a hfile reference node " 151 + "for it in the queue."); 152 } 153 154 @Test 155 public void testGetDeletableFiles() throws Exception { 156 // 1. Create two files and assert that they do not exist 157 Path notDeletablefile = new Path(root, "testGetDeletableFiles_1"); 158 fs.createNewFile(notDeletablefile); 159 assertTrue(fs.exists(notDeletablefile), "Test file not created!"); 160 Path deletablefile = new Path(root, "testGetDeletableFiles_2"); 161 fs.createNewFile(deletablefile); 162 assertTrue(fs.exists(deletablefile), "Test file not created!"); 163 164 List<FileStatus> files = new ArrayList<>(2); 165 FileStatus f = new FileStatus(); 166 f.setPath(deletablefile); 167 files.add(f); 168 f = new FileStatus(); 169 f.setPath(notDeletablefile); 170 files.add(f); 171 172 List<Pair<Path, Path>> hfiles = new ArrayList<>(1); 173 hfiles.add(new Pair<>(null, notDeletablefile)); 174 // 2. Add one file to hfile-refs queue 175 rq.addHFileRefs(peerId, hfiles); 176 177 ReplicationHFileCleaner cleaner = createCleaner(); 178 Iterator<FileStatus> deletableFilesIterator = cleaner.getDeletableFiles(files).iterator(); 179 int i = 0; 180 while (deletableFilesIterator.hasNext() && i < 2) { 181 i++; 182 } 183 // 5. Assert one file should not be deletable and it is present in the list returned 184 if (i > 2) { 185 fail("File " + notDeletablefile 186 + " should not be deletable as its hfile reference node is not added."); 187 } 188 assertTrue(deletableFilesIterator.next().getPath().equals(deletablefile)); 189 } 190 191 static class DummyServer extends MockServer { 192 193 @Override 194 public Configuration getConfiguration() { 195 return TEST_UTIL.getConfiguration(); 196 } 197 198 @Override 199 public ZKWatcher getZooKeeper() { 200 try { 201 return TEST_UTIL.getZooKeeperWatcher(); 202 } catch (IOException e) { 203 throw new UncheckedIOException(e); 204 } 205 } 206 207 @Override 208 public Connection getConnection() { 209 try { 210 return TEST_UTIL.getConnection(); 211 } catch (IOException e) { 212 throw new UncheckedIOException(e); 213 } 214 } 215 216 @Override 217 public FileSystem getFileSystem() { 218 try { 219 return TEST_UTIL.getTestFileSystem(); 220 } catch (IOException e) { 221 throw new UncheckedIOException(e); 222 } 223 } 224 225 @Override 226 public KeyManagementService getKeyManagementService() { 227 return null; 228 } 229 } 230}