001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertTrue; 023 024import java.io.IOException; 025import java.time.Duration; 026import java.util.List; 027import org.apache.hadoop.fs.Path; 028import org.apache.hadoop.hbase.Cell; 029import org.apache.hadoop.hbase.HBaseClassTestRule; 030import org.apache.hadoop.hbase.HBaseTestingUtility; 031import org.apache.hadoop.hbase.HConstants; 032import org.apache.hadoop.hbase.MiniHBaseCluster; 033import org.apache.hadoop.hbase.ServerName; 034import org.apache.hadoop.hbase.TableName; 035import org.apache.hadoop.hbase.client.Get; 036import org.apache.hadoop.hbase.client.Put; 037import org.apache.hadoop.hbase.client.RegionInfo; 038import org.apache.hadoop.hbase.client.Result; 039import org.apache.hadoop.hbase.client.Table; 040import org.apache.hadoop.hbase.master.region.MasterRegionFactory; 041import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; 042import org.apache.hadoop.hbase.regionserver.HRegionServer; 043import org.apache.hadoop.hbase.testclassification.LargeTests; 044import org.apache.hadoop.hbase.util.Bytes; 045import org.apache.hadoop.hbase.util.CommonFSUtils; 046import org.apache.hadoop.hbase.zookeeper.ZKUtil; 047import org.junit.Before; 048import org.junit.ClassRule; 049import org.junit.Rule; 050import org.junit.Test; 051import org.junit.experimental.categories.Category; 052import org.junit.rules.TestName; 053 054/** 055 * Test reuse storefiles within data directory when cluster failover with a set of new region 056 * servers with different hostnames with or without WALs and Zookeeper ZNodes, the master and 057 * cluster should fail respectively if there is any situation considered as not supported. 058 */ 059@Category({ LargeTests.class }) 060public class TestRecreateCluster { 061 @ClassRule 062 public static final HBaseClassTestRule CLASS_RULE = 063 HBaseClassTestRule.forClass(TestRecreateCluster.class); 064 065 @Rule 066 public TestName name = new TestName(); 067 068 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 069 private static final int NUM_RS = 3; 070 private static final long TIMEOUT_MS = Duration.ofMinutes(1).toMillis(); 071 private static final long MASTER_INIT_TIMEOUT_MS = Duration.ofSeconds(45).toMillis(); 072 073 @Before 074 public void setup() { 075 TEST_UTIL.getConfiguration().setLong("hbase.master.init.timeout.localHBaseCluster", 076 MASTER_INIT_TIMEOUT_MS); 077 } 078 079 @Test 080 public void testRecreateCluster_UserTableDisabled_ReuseWALsAndZNodes() throws Exception { 081 validateRecreateClusterWithUserDisabled(false, false); 082 } 083 084 @Test 085 public void testRecreateCluster_UserTableEnabled_ReuseWALsAndZNodes() throws Exception { 086 validateRecreateClusterWithUserTableEnabled(false, false); 087 } 088 089 @Test 090 public void testRecreateCluster_UserTableEnabled_CleanupZNodes() throws Exception { 091 // new InitMetaProcedure are not submitted and reused the existing SUCCESS InitMetaProcedure 092 // initMetaProc.await() hangs forever. 093 validateRecreateClusterWithUserTableEnabled(false, true); 094 } 095 096 @Test(expected = IOException.class) 097 public void testRecreateCluster_UserTableEnabled_CleanupWALAndZNodes() throws Exception { 098 // master fails with InitMetaProcedure because it cannot delete existing meta table directory, 099 // region server cannot join and time-out the cluster starts. 100 validateRecreateClusterWithUserTableEnabled(true, true); 101 } 102 103 private void validateRecreateClusterWithUserDisabled(boolean cleanupWALs, boolean cleanUpZNodes) 104 throws Exception { 105 TEST_UTIL.startMiniCluster(NUM_RS); 106 try { 107 TableName tableName = TableName.valueOf("t1"); 108 prepareDataBeforeRecreate(TEST_UTIL, tableName); 109 TEST_UTIL.getAdmin().disableTable(tableName); 110 TEST_UTIL.waitTableDisabled(tableName.getName()); 111 restartHBaseCluster(cleanupWALs, cleanUpZNodes); 112 TEST_UTIL.getAdmin().enableTable(tableName); 113 validateDataAfterRecreate(TEST_UTIL, tableName); 114 } finally { 115 TEST_UTIL.shutdownMiniCluster(); 116 } 117 } 118 119 private void validateRecreateClusterWithUserTableEnabled(boolean cleanupWALs, 120 boolean cleanUpZNodes) throws Exception { 121 TEST_UTIL.startMiniCluster(NUM_RS); 122 try { 123 TableName tableName = TableName.valueOf("t1"); 124 prepareDataBeforeRecreate(TEST_UTIL, tableName); 125 restartHBaseCluster(cleanupWALs, cleanUpZNodes); 126 validateDataAfterRecreate(TEST_UTIL, tableName); 127 } finally { 128 TEST_UTIL.shutdownMiniCluster(); 129 } 130 } 131 132 private void restartHBaseCluster(boolean cleanUpWALs, boolean cleanUpZnodes) throws Exception { 133 // flush cache so that everything is on disk 134 TEST_UTIL.getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME); 135 TEST_UTIL.getMiniHBaseCluster().flushcache(); 136 137 List<ServerName> oldServers = 138 TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServersList(); 139 140 // make sure there is no procedures pending 141 TEST_UTIL.waitFor(TIMEOUT_MS, () -> TEST_UTIL.getHBaseCluster().getMaster().getProcedures() 142 .stream().filter(p -> p.isFinished()).findAny().isPresent()); 143 144 // shutdown and delete data if needed 145 Path walRootDirPath = TEST_UTIL.getMiniHBaseCluster().getMaster().getWALRootDir(); 146 Path rootDirPath = CommonFSUtils.getRootDir(TEST_UTIL.getConfiguration()); 147 TEST_UTIL.shutdownMiniHBaseCluster(); 148 149 if (cleanUpWALs) { 150 TEST_UTIL.getDFSCluster().getFileSystem() 151 .delete(new Path(rootDirPath, MasterRegionFactory.MASTER_STORE_DIR), true); 152 TEST_UTIL.getDFSCluster().getFileSystem() 153 .delete(new Path(walRootDirPath, MasterRegionFactory.MASTER_STORE_DIR), true); 154 TEST_UTIL.getDFSCluster().getFileSystem() 155 .delete(new Path(walRootDirPath, WALProcedureStore.MASTER_PROCEDURE_LOGDIR), true); 156 157 TEST_UTIL.getDFSCluster().getFileSystem() 158 .delete(new Path(walRootDirPath, HConstants.HREGION_LOGDIR_NAME), true); 159 TEST_UTIL.getDFSCluster().getFileSystem() 160 .delete(new Path(walRootDirPath, HConstants.HREGION_OLDLOGDIR_NAME), true); 161 } 162 163 if (cleanUpZnodes) { 164 // delete all zk data 165 // we cannot keep ZK data because it will hold the meta region states as open and 166 // didn't submit a InitMetaProcedure 167 ZKUtil.deleteChildrenRecursively(TEST_UTIL.getZooKeeperWatcher(), 168 TEST_UTIL.getZooKeeperWatcher().getZNodePaths().baseZNode); 169 TEST_UTIL.shutdownMiniZKCluster(); 170 TEST_UTIL.startMiniZKCluster(); 171 } 172 173 TEST_UTIL.restartHBaseCluster(NUM_RS); 174 TEST_UTIL.waitFor(TIMEOUT_MS, 175 () -> TEST_UTIL.getMiniHBaseCluster().getNumLiveRegionServers() == NUM_RS); 176 177 // make sure we have a new set of region servers with different hostnames and ports 178 List<ServerName> newServers = 179 TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServersList(); 180 assertFalse(newServers.stream().filter(newServer -> oldServers.contains(newServer)).findAny() 181 .isPresent()); 182 } 183 184 private void prepareDataBeforeRecreate(HBaseTestingUtility testUtil, TableName tableName) 185 throws Exception { 186 Table table = testUtil.createTable(tableName, "f"); 187 Put put = new Put(Bytes.toBytes("r1")); 188 put.addColumn(Bytes.toBytes("f"), Bytes.toBytes("c"), Bytes.toBytes("v")); 189 table.put(put); 190 191 ensureTableNotColocatedWithSystemTable(tableName, TableName.NAMESPACE_TABLE_NAME); 192 } 193 194 private void ensureTableNotColocatedWithSystemTable(TableName userTable, TableName systemTable) 195 throws IOException, InterruptedException { 196 MiniHBaseCluster hbaseCluster = TEST_UTIL.getHBaseCluster(); 197 assertTrue("Please start more than 1 regionserver", 198 hbaseCluster.getRegionServerThreads().size() > 1); 199 200 int userTableServerNum = getServerNumForTableWithOnlyOneRegion(userTable); 201 int systemTableServerNum = getServerNumForTableWithOnlyOneRegion(systemTable); 202 203 if (userTableServerNum != systemTableServerNum) { 204 // no-ops if user table and system are already on a different host 205 return; 206 } 207 208 int destServerNum = (systemTableServerNum + 1) % NUM_RS; 209 assertTrue(systemTableServerNum != destServerNum); 210 211 HRegionServer systemTableServer = hbaseCluster.getRegionServer(systemTableServerNum); 212 HRegionServer destServer = hbaseCluster.getRegionServer(destServerNum); 213 assertTrue(!systemTableServer.equals(destServer)); 214 // make sure the dest server is live before moving region 215 hbaseCluster.waitForRegionServerToStart(destServer.getServerName().getHostname(), 216 destServer.getServerName().getPort(), TIMEOUT_MS); 217 // move region of userTable to a different regionserver not co-located with system table 218 TEST_UTIL.moveRegionAndWait(TEST_UTIL.getAdmin().getRegions(userTable).get(0), 219 destServer.getServerName()); 220 } 221 222 private int getServerNumForTableWithOnlyOneRegion(TableName tableName) throws IOException { 223 List<RegionInfo> tableRegionInfos = TEST_UTIL.getAdmin().getRegions(tableName); 224 assertEquals(1, tableRegionInfos.size()); 225 return TEST_UTIL.getHBaseCluster().getServerWith(tableRegionInfos.get(0).getRegionName()); 226 } 227 228 private void validateDataAfterRecreate(HBaseTestingUtility testUtil, TableName tableName) 229 throws Exception { 230 Table t1 = testUtil.getConnection().getTable(tableName); 231 Get get = new Get(Bytes.toBytes("r1")); 232 get.addColumn(Bytes.toBytes("f"), Bytes.toBytes("c")); 233 Result result = t1.get(get); 234 assertTrue(result.advance()); 235 Cell cell = result.current(); 236 assertEquals("v", 237 Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); 238 assertFalse(result.advance()); 239 } 240 241}