001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.wal; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertNotNull; 023import static org.junit.Assert.assertTrue; 024 025import java.util.ArrayList; 026import java.util.List; 027import org.apache.hadoop.fs.Path; 028import org.apache.hadoop.hbase.Cell; 029import org.apache.hadoop.hbase.CellUtil; 030import org.apache.hadoop.hbase.HBaseClassTestRule; 031import org.apache.hadoop.hbase.HBaseTestingUtility; 032import org.apache.hadoop.hbase.MiniHBaseCluster; 033import org.apache.hadoop.hbase.ServerName; 034import org.apache.hadoop.hbase.TableName; 035import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 036import org.apache.hadoop.hbase.client.Get; 037import org.apache.hadoop.hbase.client.Put; 038import org.apache.hadoop.hbase.client.Result; 039import org.apache.hadoop.hbase.client.Table; 040import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 041import org.apache.hadoop.hbase.testclassification.LargeTests; 042import org.apache.hadoop.hbase.testclassification.RegionServerTests; 043import org.apache.hadoop.hbase.util.Bytes; 044import org.apache.hadoop.hbase.util.CommonFSUtils; 045import org.junit.AfterClass; 046import org.junit.BeforeClass; 047import org.junit.ClassRule; 048import org.junit.Test; 049import org.junit.experimental.categories.Category; 050 051@Category({ RegionServerTests.class, LargeTests.class }) 052public class TestWALSplitWithDeletedTableData { 053 054 @ClassRule 055 public static final HBaseClassTestRule CLASS_RULE = 056 HBaseClassTestRule.forClass(TestWALSplitWithDeletedTableData.class); 057 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 058 059 @BeforeClass 060 public static void setup() throws Exception { 061 TEST_UTIL.startMiniCluster(2); 062 } 063 064 @AfterClass 065 public static void tearDown() throws Exception { 066 TEST_UTIL.shutdownMiniCluster(); 067 } 068 069 @Test 070 public void testWALSplitWithDeletedTableData() throws Exception { 071 final byte[] CFNAME = Bytes.toBytes("f1"); 072 final byte[] QNAME = Bytes.toBytes("q1"); 073 final byte[] VALUE = Bytes.toBytes("v1"); 074 final TableName t1 = TableName.valueOf("t1"); 075 final TableName t2 = TableName.valueOf("t2"); 076 final byte[][] splitRows = 077 { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"), Bytes.toBytes("d") }; 078 TableDescriptorBuilder htdBuilder1 = TableDescriptorBuilder.newBuilder(t1); 079 htdBuilder1.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CFNAME).build()); 080 Table tab1 = TEST_UTIL.createTable(htdBuilder1.build(), splitRows); 081 TableDescriptorBuilder htdBuilder2 = TableDescriptorBuilder.newBuilder(t2); 082 htdBuilder2.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CFNAME).build()); 083 Table tab2 = TEST_UTIL.createTable(htdBuilder2.build(), splitRows); 084 List<Put> puts = new ArrayList<Put>(4); 085 byte[][] rks = 086 { Bytes.toBytes("ac"), Bytes.toBytes("ba"), Bytes.toBytes("ca"), Bytes.toBytes("dd") }; 087 for (byte[] rk : rks) { 088 puts.add(new Put(rk).addColumn(CFNAME, QNAME, VALUE)); 089 } 090 tab1.put(puts); 091 tab2.put(puts); 092 MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster(); 093 TEST_UTIL.deleteTable(t1); 094 Path tableDir = CommonFSUtils.getWALTableDir(TEST_UTIL.getConfiguration(), t1); 095 // Dropping table 't1' removed the table directory from the WAL FS completely 096 assertFalse(TEST_UTIL.getDFSCluster().getFileSystem().exists(tableDir)); 097 ServerName rs1 = cluster.getRegionServer(1).getServerName(); 098 // Kill one RS and wait for the WAL split and replay be over. 099 cluster.killRegionServer(rs1); 100 cluster.waitForRegionServerToStop(rs1, 60 * 1000); 101 assertEquals(1, cluster.getNumLiveRegionServers()); 102 Thread.sleep(1 * 1000); 103 TEST_UTIL.waitUntilNoRegionsInTransition(60 * 1000); 104 // Table 't1' is dropped. Assert table directory does not exist in WAL FS after WAL split. 105 assertFalse(TEST_UTIL.getDFSCluster().getFileSystem().exists(tableDir)); 106 // Assert the table t2 region's data getting replayed after WAL split and available 107 for (byte[] rk : rks) { 108 Result result = tab2.get(new Get(rk)); 109 assertFalse(result.isEmpty()); 110 Cell cell = result.getColumnLatestCell(CFNAME, QNAME); 111 assertNotNull(cell); 112 assertTrue(CellUtil.matchingValue(cell, VALUE)); 113 } 114 } 115}