001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.mob; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertTrue; 022 023import java.io.IOException; 024import java.util.Arrays; 025import java.util.List; 026import java.util.stream.Collectors; 027import org.apache.hadoop.conf.Configuration; 028import org.apache.hadoop.fs.FileStatus; 029import org.apache.hadoop.fs.FileSystem; 030import org.apache.hadoop.fs.Path; 031import org.apache.hadoop.hbase.HBaseClassTestRule; 032import org.apache.hadoop.hbase.HBaseTestingUtility; 033import org.apache.hadoop.hbase.ServerName; 034import org.apache.hadoop.hbase.TableName; 035import org.apache.hadoop.hbase.client.Admin; 036import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 037import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 038import org.apache.hadoop.hbase.client.CompactionState; 039import org.apache.hadoop.hbase.client.Put; 040import org.apache.hadoop.hbase.client.RegionInfo; 041import org.apache.hadoop.hbase.client.Result; 042import org.apache.hadoop.hbase.client.ResultScanner; 043import org.apache.hadoop.hbase.client.Table; 044import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 045import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; 046import org.apache.hadoop.hbase.testclassification.MediumTests; 047import org.apache.hadoop.hbase.util.Bytes; 048import org.junit.After; 049import org.junit.Before; 050import org.junit.ClassRule; 051import org.junit.Test; 052import org.junit.experimental.categories.Category; 053import org.slf4j.Logger; 054import org.slf4j.LoggerFactory; 055 056/** 057 * Mob file cleaner chore test. 1. Creates MOB table 2. Load MOB data and flushes it N times 3. Runs 058 * major MOB compaction 4. Verifies that number of MOB files in a mob directory is N+1 5. Waits for 059 * a period of time larger than minimum age to archive 6. Runs Mob cleaner chore 7 Verifies that 060 * every old MOB file referenced from current RS was archived 061 */ 062@Category(MediumTests.class) 063public class TestRSMobFileCleanerChore { 064 private static final Logger LOG = LoggerFactory.getLogger(TestRSMobFileCleanerChore.class); 065 @ClassRule 066 public static final HBaseClassTestRule CLASS_RULE = 067 HBaseClassTestRule.forClass(TestRSMobFileCleanerChore.class); 068 069 private HBaseTestingUtility HTU; 070 071 private final static String famStr = "f1"; 072 private final static byte[] fam = Bytes.toBytes(famStr); 073 private final static byte[] qualifier = Bytes.toBytes("q1"); 074 private final static long mobLen = 10; 075 private final static byte[] mobVal = Bytes 076 .toBytes("01234567890123456789012345678901234567890123456789012345678901234567890123456789"); 077 078 private Configuration conf; 079 private TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor; 080 private ColumnFamilyDescriptor familyDescriptor; 081 private Admin admin; 082 private Table table = null; 083 private RSMobFileCleanerChore chore; 084 private long minAgeToArchive = 10000; 085 086 public TestRSMobFileCleanerChore() { 087 } 088 089 @Before 090 public void setUp() throws Exception { 091 HTU = new HBaseTestingUtility(); 092 conf = HTU.getConfiguration(); 093 094 initConf(); 095 096 HTU.startMiniCluster(); 097 admin = HTU.getAdmin(); 098 familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(fam).setMobEnabled(true) 099 .setMobThreshold(mobLen).setMaxVersions(1).build(); 100 tableDescriptor = 101 HTU.createModifyableTableDescriptor("testMobCompactTable").setColumnFamily(familyDescriptor); 102 table = HTU.createTable(tableDescriptor, Bytes.toByteArrays("1")); 103 } 104 105 private void initConf() { 106 107 conf.setInt("hfile.format.version", 3); 108 conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, 0); 109 conf.setInt("hbase.client.retries.number", 100); 110 conf.setInt("hbase.hregion.max.filesize", 200000000); 111 conf.setInt("hbase.hregion.memstore.flush.size", 800000); 112 conf.setInt("hbase.hstore.blockingStoreFiles", 150); 113 conf.setInt("hbase.hstore.compaction.throughput.lower.bound", 52428800); 114 conf.setInt("hbase.hstore.compaction.throughput.higher.bound", 2 * 52428800); 115 // conf.set(MobStoreEngine.DEFAULT_MOB_COMPACTOR_CLASS_KEY, 116 // FaultyMobStoreCompactor.class.getName()); 117 // Disable automatic MOB compaction 118 conf.setLong(MobConstants.MOB_COMPACTION_CHORE_PERIOD, 0); 119 // Disable automatic MOB file cleaner chore 120 conf.setLong(MobConstants.MOB_CLEANER_PERIOD, 0); 121 // Set minimum age to archive to 10 sec 122 conf.setLong(MobConstants.MIN_AGE_TO_ARCHIVE_KEY, minAgeToArchive); 123 // Set compacted file discharger interval to a half minAgeToArchive 124 conf.setLong("hbase.hfile.compaction.discharger.interval", minAgeToArchive / 2); 125 } 126 127 private void loadData(int start, int num) { 128 try { 129 130 for (int i = 0; i < num; i++) { 131 Put p = new Put(Bytes.toBytes(start + i)); 132 p.addColumn(fam, qualifier, mobVal); 133 table.put(p); 134 } 135 admin.flush(table.getName()); 136 } catch (Exception e) { 137 LOG.error("MOB file cleaner chore test FAILED", e); 138 assertTrue(false); 139 } 140 } 141 142 @After 143 public void tearDown() throws Exception { 144 admin.disableTable(tableDescriptor.getTableName()); 145 admin.deleteTable(tableDescriptor.getTableName()); 146 HTU.shutdownMiniCluster(); 147 } 148 149 @Test 150 public void testMobFileCleanerChore() throws InterruptedException, IOException { 151 loadData(0, 10); 152 loadData(10, 10); 153 // loadData(20, 10); 154 long num = getNumberOfMobFiles(conf, table.getName(), new String(fam)); 155 assertEquals(2, num); 156 // Major compact 157 admin.majorCompact(tableDescriptor.getTableName(), fam); 158 // wait until compaction is complete 159 while (admin.getCompactionState(tableDescriptor.getTableName()) != CompactionState.NONE) { 160 Thread.sleep(100); 161 } 162 163 num = getNumberOfMobFiles(conf, table.getName(), new String(fam)); 164 assertEquals(3, num); 165 // We have guarantee, that compcated file discharger will run during this pause 166 // because it has interval less than this wait time 167 LOG.info("Waiting for {}ms", minAgeToArchive + 1000); 168 169 Thread.sleep(minAgeToArchive + 1000); 170 LOG.info("Cleaning up MOB files"); 171 172 ServerName serverUsed = null; 173 List<RegionInfo> serverRegions = null; 174 for (ServerName sn : admin.getRegionServers()) { 175 serverRegions = admin.getRegions(sn); 176 if (serverRegions != null && serverRegions.size() > 0) { 177 // filtering out non test table regions 178 serverRegions = serverRegions.stream().filter(r -> r.getTable() == table.getName()) 179 .collect(Collectors.toList()); 180 // if such one is found use this rs 181 if (serverRegions.size() > 0) { 182 serverUsed = sn; 183 } 184 break; 185 } 186 } 187 188 chore = HTU.getMiniHBaseCluster().getRegionServer(serverUsed).getRSMobFileCleanerChore(); 189 190 chore.chore(); 191 192 num = getNumberOfMobFiles(conf, table.getName(), new String(fam)); 193 assertEquals(3 - serverRegions.size(), num); 194 195 long scanned = scanTable(); 196 assertEquals(20, scanned); 197 198 // creating a MOB file not referenced from the current RS 199 Path extraMOBFile = MobTestUtil.generateMOBFileForRegion(conf, table.getName(), 200 familyDescriptor, "nonExistentRegion"); 201 202 // verifying the new MOBfile is added 203 num = getNumberOfMobFiles(conf, table.getName(), new String(fam)); 204 assertEquals(4 - serverRegions.size(), num); 205 206 FileSystem fs = FileSystem.get(conf); 207 assertTrue(fs.exists(extraMOBFile)); 208 209 LOG.info("Waiting for {}ms", minAgeToArchive + 1000); 210 211 Thread.sleep(minAgeToArchive + 1000); 212 LOG.info("Cleaning up MOB files"); 213 214 // running chore again 215 chore.chore(); 216 217 // the chore should only archive old MOB files that were referenced from the current RS 218 // the unrelated MOB file is still there 219 num = getNumberOfMobFiles(conf, table.getName(), new String(fam)); 220 assertEquals(4 - serverRegions.size(), num); 221 222 assertTrue(fs.exists(extraMOBFile)); 223 224 scanned = scanTable(); 225 assertEquals(20, scanned); 226 } 227 228 private long getNumberOfMobFiles(Configuration conf, TableName tableName, String family) 229 throws IOException { 230 FileSystem fs = FileSystem.get(conf); 231 Path dir = MobUtils.getMobFamilyPath(conf, tableName, family); 232 FileStatus[] stat = fs.listStatus(dir); 233 for (FileStatus st : stat) { 234 LOG.debug("DDDD MOB Directory content: {} size={}", st.getPath(), st.getLen()); 235 } 236 LOG.debug("MOB Directory content total files: {}", stat.length); 237 238 return stat.length; 239 } 240 241 private long scanTable() { 242 try { 243 244 Result result; 245 ResultScanner scanner = table.getScanner(fam); 246 long counter = 0; 247 while ((result = scanner.next()) != null) { 248 assertTrue(Arrays.equals(result.getValue(fam, qualifier), mobVal)); 249 counter++; 250 } 251 return counter; 252 } catch (Exception e) { 253 e.printStackTrace(); 254 LOG.error("MOB file cleaner chore test FAILED"); 255 if (HTU != null) { 256 assertTrue(false); 257 } else { 258 System.exit(-1); 259 } 260 } 261 return 0; 262 } 263}