001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master.cleaner; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertTrue; 023import static org.junit.Assert.fail; 024import static org.mockito.Mockito.doThrow; 025import static org.mockito.Mockito.spy; 026 027import java.io.IOException; 028import java.util.ArrayList; 029import java.util.Iterator; 030import java.util.List; 031import org.apache.hadoop.conf.Configuration; 032import org.apache.hadoop.fs.FileStatus; 033import org.apache.hadoop.fs.FileSystem; 034import org.apache.hadoop.fs.Path; 035import org.apache.hadoop.hbase.Abortable; 036import org.apache.hadoop.hbase.ChoreService; 037import org.apache.hadoop.hbase.CoordinatedStateManager; 038import org.apache.hadoop.hbase.HBaseClassTestRule; 039import org.apache.hadoop.hbase.HBaseTestingUtility; 040import org.apache.hadoop.hbase.HConstants; 041import org.apache.hadoop.hbase.Server; 042import org.apache.hadoop.hbase.ServerName; 043import org.apache.hadoop.hbase.ZooKeeperConnectionException; 044import org.apache.hadoop.hbase.client.ClusterConnection; 045import org.apache.hadoop.hbase.client.Connection; 046import org.apache.hadoop.hbase.master.HMaster; 047import org.apache.hadoop.hbase.replication.ReplicationException; 048import org.apache.hadoop.hbase.replication.ReplicationFactory; 049import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; 050import org.apache.hadoop.hbase.replication.ReplicationPeers; 051import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; 052import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; 053import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner; 054import org.apache.hadoop.hbase.testclassification.MasterTests; 055import org.apache.hadoop.hbase.testclassification.SmallTests; 056import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 057import org.apache.hadoop.hbase.util.Pair; 058import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; 059import org.apache.hadoop.hbase.zookeeper.ZKWatcher; 060import org.apache.zookeeper.KeeperException; 061import org.apache.zookeeper.data.Stat; 062import org.junit.After; 063import org.junit.AfterClass; 064import org.junit.Before; 065import org.junit.BeforeClass; 066import org.junit.ClassRule; 067import org.junit.Test; 068import org.junit.experimental.categories.Category; 069import org.slf4j.Logger; 070import org.slf4j.LoggerFactory; 071 072import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 073 074@Category({ MasterTests.class, SmallTests.class }) 075public class TestReplicationHFileCleaner { 076 077 @ClassRule 078 public static final HBaseClassTestRule CLASS_RULE = 079 HBaseClassTestRule.forClass(TestReplicationHFileCleaner.class); 080 081 private static final Logger LOG = LoggerFactory.getLogger(TestReplicationHFileCleaner.class); 082 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 083 private static Server server; 084 private static ReplicationQueueStorage rq; 085 private static ReplicationPeers rp; 086 private static final String peerId = "TestReplicationHFileCleaner"; 087 private static Configuration conf = TEST_UTIL.getConfiguration(); 088 static FileSystem fs = null; 089 Path root; 090 091 @BeforeClass 092 public static void setUpBeforeClass() throws Exception { 093 TEST_UTIL.startMiniZKCluster(); 094 server = new DummyServer(); 095 conf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true); 096 HMaster.decorateMasterConfiguration(conf); 097 rp = ReplicationFactory.getReplicationPeers(server.getZooKeeper(), conf); 098 rp.init(); 099 rq = ReplicationStorageFactory.getReplicationQueueStorage(server.getZooKeeper(), conf); 100 fs = FileSystem.get(conf); 101 } 102 103 @AfterClass 104 public static void tearDownAfterClass() throws Exception { 105 TEST_UTIL.shutdownMiniZKCluster(); 106 } 107 108 @Before 109 public void setup() throws ReplicationException, IOException { 110 root = TEST_UTIL.getDataTestDirOnTestFS(); 111 rp.getPeerStorage().addPeer(peerId, 112 ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL.getClusterKey()).build(), true); 113 rq.addPeerToHFileRefs(peerId); 114 } 115 116 @After 117 public void cleanup() throws ReplicationException { 118 try { 119 fs.delete(root, true); 120 } catch (IOException e) { 121 LOG.warn("Failed to delete files recursively from path " + root); 122 } 123 // Remove all HFileRefs (if any) 124 rq.removeHFileRefs(peerId, rq.getReplicableHFiles(peerId)); 125 rp.getPeerStorage().removePeer(peerId); 126 } 127 128 @Test 129 public void testIsFileDeletable() throws IOException, ReplicationException { 130 // 1. Create a file 131 Path file = new Path(root, "testIsFileDeletableWithNoHFileRefs"); 132 fs.createNewFile(file); 133 // 2. Assert file is successfully created 134 assertTrue("Test file not created!", fs.exists(file)); 135 ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner(); 136 cleaner.setConf(conf); 137 // 3. Assert that file as is should be deletable 138 assertTrue("Cleaner should allow to delete this file as there is no hfile reference node " 139 + "for it in the queue.", cleaner.isFileDeletable(fs.getFileStatus(file))); 140 141 List<Pair<Path, Path>> files = new ArrayList<>(1); 142 files.add(new Pair<>(null, file)); 143 // 4. Add the file to hfile-refs queue 144 rq.addHFileRefs(peerId, files); 145 // 5. Assert file should not be deletable 146 assertFalse("Cleaner should not allow to delete this file as there is a hfile reference node " 147 + "for it in the queue.", cleaner.isFileDeletable(fs.getFileStatus(file))); 148 } 149 150 @Test 151 public void testGetDeletableFiles() throws Exception { 152 // 1. Create two files and assert that they do not exist 153 Path notDeletablefile = new Path(root, "testGetDeletableFiles_1"); 154 fs.createNewFile(notDeletablefile); 155 assertTrue("Test file not created!", fs.exists(notDeletablefile)); 156 Path deletablefile = new Path(root, "testGetDeletableFiles_2"); 157 fs.createNewFile(deletablefile); 158 assertTrue("Test file not created!", fs.exists(deletablefile)); 159 160 List<FileStatus> files = new ArrayList<>(2); 161 FileStatus f = new FileStatus(); 162 f.setPath(deletablefile); 163 files.add(f); 164 f = new FileStatus(); 165 f.setPath(notDeletablefile); 166 files.add(f); 167 168 List<Pair<Path, Path>> hfiles = new ArrayList<>(1); 169 hfiles.add(new Pair<>(null, notDeletablefile)); 170 // 2. Add one file to hfile-refs queue 171 rq.addHFileRefs(peerId, hfiles); 172 173 ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner(); 174 cleaner.setConf(conf); 175 Iterator<FileStatus> deletableFilesIterator = cleaner.getDeletableFiles(files).iterator(); 176 int i = 0; 177 while (deletableFilesIterator.hasNext() && i < 2) { 178 i++; 179 } 180 // 5. Assert one file should not be deletable and it is present in the list returned 181 if (i > 2) { 182 fail("File " + notDeletablefile 183 + " should not be deletable as its hfile reference node is not added."); 184 } 185 assertTrue(deletableFilesIterator.next().getPath().equals(deletablefile)); 186 } 187 188 /** 189 * ReplicationHFileCleaner should be able to ride over ZooKeeper errors without aborting. 190 */ 191 @Test 192 public void testZooKeeperAbort() throws Exception { 193 ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner(); 194 195 List<FileStatus> dummyFiles = Lists.newArrayList( 196 new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("hfile1")), 197 new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("hfile2"))); 198 199 FaultyZooKeeperWatcher faultyZK = 200 new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null); 201 try { 202 faultyZK.init(); 203 cleaner.setConf(conf, faultyZK); 204 // should keep all files due to a ConnectionLossException getting the queues znodes 205 Iterable<FileStatus> toDelete = cleaner.getDeletableFiles(dummyFiles); 206 assertFalse(toDelete.iterator().hasNext()); 207 assertFalse(cleaner.isStopped()); 208 } finally { 209 faultyZK.close(); 210 } 211 212 // when zk is working both files should be returned 213 cleaner = new ReplicationHFileCleaner(); 214 ZKWatcher zkw = new ZKWatcher(conf, "testZooKeeperAbort-normal", null); 215 try { 216 cleaner.setConf(conf, zkw); 217 Iterable<FileStatus> filesToDelete = cleaner.getDeletableFiles(dummyFiles); 218 Iterator<FileStatus> iter = filesToDelete.iterator(); 219 assertTrue(iter.hasNext()); 220 assertEquals(new Path("hfile1"), iter.next().getPath()); 221 assertTrue(iter.hasNext()); 222 assertEquals(new Path("hfile2"), iter.next().getPath()); 223 assertFalse(iter.hasNext()); 224 } finally { 225 zkw.close(); 226 } 227 } 228 229 static class DummyServer implements Server { 230 231 @Override 232 public Configuration getConfiguration() { 233 return TEST_UTIL.getConfiguration(); 234 } 235 236 @Override 237 public ZKWatcher getZooKeeper() { 238 try { 239 return new ZKWatcher(getConfiguration(), "dummy server", this); 240 } catch (IOException e) { 241 e.printStackTrace(); 242 } 243 return null; 244 } 245 246 @Override 247 public CoordinatedStateManager getCoordinatedStateManager() { 248 return null; 249 } 250 251 @Override 252 public ClusterConnection getConnection() { 253 return null; 254 } 255 256 @Override 257 public ServerName getServerName() { 258 return ServerName.valueOf("regionserver,60020,000000"); 259 } 260 261 @Override 262 public void abort(String why, Throwable e) { 263 } 264 265 @Override 266 public boolean isAborted() { 267 return false; 268 } 269 270 @Override 271 public void stop(String why) { 272 } 273 274 @Override 275 public boolean isStopped() { 276 return false; 277 } 278 279 @Override 280 public ChoreService getChoreService() { 281 return null; 282 } 283 284 @Override 285 public ClusterConnection getClusterConnection() { 286 // TODO Auto-generated method stub 287 return null; 288 } 289 290 @Override 291 public FileSystem getFileSystem() { 292 return null; 293 } 294 295 @Override 296 public boolean isStopping() { 297 return false; 298 } 299 300 @Override 301 public Connection createConnection(Configuration conf) throws IOException { 302 return null; 303 } 304 } 305 306 static class FaultyZooKeeperWatcher extends ZKWatcher { 307 private RecoverableZooKeeper zk; 308 309 public FaultyZooKeeperWatcher(Configuration conf, String identifier, Abortable abortable) 310 throws ZooKeeperConnectionException, IOException { 311 super(conf, identifier, abortable); 312 } 313 314 public void init() throws Exception { 315 this.zk = spy(super.getRecoverableZooKeeper()); 316 doThrow(new KeeperException.ConnectionLossException()).when(zk) 317 .getData("/hbase/replication/hfile-refs", null, new Stat()); 318 } 319 320 @Override 321 public RecoverableZooKeeper getRecoverableZooKeeper() { 322 return zk; 323 } 324 } 325}