001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019package org.apache.hadoop.hbase.snapshot; 020 021import java.io.IOException; 022import java.util.ArrayList; 023import java.util.Collections; 024import java.util.List; 025import java.util.concurrent.atomic.AtomicBoolean; 026 027import org.apache.hadoop.conf.Configuration; 028import org.apache.hadoop.fs.FileStatus; 029import org.apache.hadoop.fs.FileSystem; 030import org.apache.hadoop.fs.Path; 031import org.apache.hadoop.hbase.HBaseClassTestRule; 032import org.apache.hadoop.hbase.HBaseTestingUtility; 033import org.apache.hadoop.hbase.TableName; 034import org.apache.hadoop.hbase.TestTableName; 035import org.apache.hadoop.hbase.client.Put; 036import org.apache.hadoop.hbase.client.Table; 037import org.apache.hadoop.hbase.master.HMaster; 038import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner; 039import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; 040import org.apache.hadoop.hbase.testclassification.LargeTests; 041import org.apache.hadoop.hbase.util.Bytes; 042import org.apache.hadoop.hbase.util.FSUtils; 043import org.apache.hadoop.hbase.util.FSVisitor; 044import org.junit.AfterClass; 045import org.junit.Assert; 046import org.junit.BeforeClass; 047import org.junit.ClassRule; 048import org.junit.Rule; 049import org.junit.Test; 050import org.junit.experimental.categories.Category; 051import org.slf4j.Logger; 052import org.slf4j.LoggerFactory; 053 054import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; 055import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 056 057/** 058 * Test Case for HBASE-21387 059 */ 060@Category({ LargeTests.class }) 061public class TestSnapshotWhenChoreCleaning { 062 063 @ClassRule 064 public static final HBaseClassTestRule CLASS_RULE = 065 HBaseClassTestRule.forClass(TestSnapshotWhenChoreCleaning.class); 066 067 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 068 private static final Configuration CONF = TEST_UTIL.getConfiguration(); 069 private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotClientRetries.class); 070 private static final TableName TABLE_NAME = TableName.valueOf("testTable"); 071 private static final int MAX_SPLIT_KEYS_NUM = 100; 072 private static final byte[] FAMILY = Bytes.toBytes("family"); 073 private static final byte[] QUALIFIER = Bytes.toBytes("qualifier"); 074 private static final byte[] VALUE = Bytes.toBytes("value"); 075 private static Table TABLE; 076 077 @Rule 078 public TestTableName TEST_TABLE = new TestTableName(); 079 080 @BeforeClass 081 public static void setUp() throws Exception { 082 // Set the hbase.snapshot.thread.pool.max to 1; 083 CONF.setInt("hbase.snapshot.thread.pool.max", 1); 084 // Enable snapshot 085 CONF.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); 086 // Start MiniCluster. 087 TEST_UTIL.startMiniCluster(3); 088 // Create talbe 089 createTable(); 090 } 091 092 private static byte[] integerToBytes(int i) { 093 return Bytes.toBytes(String.format("%06d", i)); 094 } 095 096 private static void createTable() throws IOException { 097 byte[][] splitKeys = new byte[MAX_SPLIT_KEYS_NUM][]; 098 for (int i = 0; i < splitKeys.length; i++) { 099 splitKeys[i] = integerToBytes(i); 100 } 101 TABLE = TEST_UTIL.createTable(TABLE_NAME, FAMILY, splitKeys); 102 } 103 104 @AfterClass 105 public static void tearDown() throws Exception { 106 TEST_UTIL.shutdownMiniCluster(); 107 } 108 109 private static void loadDataAndFlush() throws IOException { 110 for (int i = 0; i < MAX_SPLIT_KEYS_NUM; i++) { 111 Put put = new Put(integerToBytes(i)).addColumn(FAMILY, QUALIFIER, 112 Bytes.add(VALUE, Bytes.toBytes(i))); 113 TABLE.put(put); 114 } 115 TEST_UTIL.flush(TABLE_NAME); 116 } 117 118 private static List<Path> listHFileNames(final FileSystem fs, final Path tableDir) 119 throws IOException { 120 final List<Path> hfiles = new ArrayList<>(); 121 FSVisitor.visitTableStoreFiles(fs, tableDir, (region, family, hfileName) -> { 122 hfiles.add(new Path(new Path(new Path(tableDir, region), family), hfileName)); 123 }); 124 Collections.sort(hfiles); 125 return hfiles; 126 } 127 128 private static boolean isAnySnapshots(FileSystem fs) throws IOException { 129 Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(FSUtils.getRootDir(CONF)); 130 FileStatus[] snapFiles = fs.listStatus(snapshotDir); 131 if (snapFiles.length == 0) { 132 return false; 133 } 134 Path firstPath = snapFiles[0].getPath(); 135 LOG.info("firstPath in isAnySnapshots: " + firstPath); 136 if (snapFiles.length == 1 && firstPath.getName().equals(".tmp")) { 137 FileStatus[] tmpSnapFiles = fs.listStatus(firstPath); 138 return tmpSnapFiles != null && tmpSnapFiles.length > 0; 139 } 140 return true; 141 } 142 143 @Test 144 public void testSnapshotWhenSnapshotHFileCleanerRunning() throws Exception { 145 // Load data and flush to generate huge number of HFiles. 146 loadDataAndFlush(); 147 148 SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner(); 149 cleaner.init(ImmutableMap.of(HMaster.MASTER, TEST_UTIL.getHBaseCluster().getMaster())); 150 cleaner.setConf(CONF); 151 152 FileSystem fs = FSUtils.getCurrentFileSystem(CONF); 153 List<Path> fileNames = 154 listHFileNames(fs, FSUtils.getTableDir(FSUtils.getRootDir(CONF), TABLE_NAME)); 155 List<FileStatus> files = new ArrayList<>(); 156 for (Path fileName : fileNames) { 157 files.add(fs.getFileStatus(fileName)); 158 } 159 160 TEST_UTIL.getAdmin().snapshot("snapshotName_prev", TABLE_NAME); 161 Assert.assertEquals(Lists.newArrayList(cleaner.getDeletableFiles(files)).size(), 0); 162 TEST_UTIL.getAdmin().deleteSnapshot("snapshotName_prev"); 163 cleaner.getFileCacheForTesting().triggerCacheRefreshForTesting(); 164 Assert.assertEquals(Lists.newArrayList(cleaner.getDeletableFiles(files)).size(), 100); 165 166 Runnable snapshotRunnable = () -> { 167 try { 168 // The thread will be busy on taking snapshot; 169 for (int k = 0; k < 5; k++) { 170 TEST_UTIL.getAdmin().snapshot("snapshotName_" + k, TABLE_NAME); 171 } 172 } catch (Exception e) { 173 LOG.error("Snapshot failed: ", e); 174 } 175 }; 176 final AtomicBoolean success = new AtomicBoolean(true); 177 Runnable cleanerRunnable = () -> { 178 try { 179 while (!isAnySnapshots(fs)) { 180 LOG.info("Not found any snapshot, sleep 100ms"); 181 Thread.sleep(100); 182 } 183 for (int k = 0; k < 5; k++) { 184 cleaner.getFileCacheForTesting().triggerCacheRefreshForTesting(); 185 Iterable<FileStatus> toDeleteFiles = cleaner.getDeletableFiles(files); 186 List<FileStatus> deletableFiles = Lists.newArrayList(toDeleteFiles); 187 LOG.info("Size of deletableFiles is: " + deletableFiles.size()); 188 for (int i = 0; i < deletableFiles.size(); i++) { 189 LOG.debug("toDeleteFiles[{}] is: {}", i, deletableFiles.get(i)); 190 } 191 if (deletableFiles.size() > 0) { 192 success.set(false); 193 } 194 } 195 } catch (Exception e) { 196 LOG.error("Chore cleaning failed: ", e); 197 } 198 }; 199 Thread t1 = new Thread(snapshotRunnable); 200 t1.start(); 201 Thread t2 = new Thread(cleanerRunnable); 202 t2.start(); 203 t1.join(); 204 t2.join(); 205 Assert.assertTrue(success.get()); 206 } 207}