001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master; 019 020import static org.junit.jupiter.api.Assertions.assertEquals; 021 022import java.io.IOException; 023import org.apache.hadoop.hbase.CatalogFamilyFormat; 024import org.apache.hadoop.hbase.HBaseTestingUtil; 025import org.apache.hadoop.hbase.HConstants; 026import org.apache.hadoop.hbase.TableName; 027import org.apache.hadoop.hbase.client.Durability; 028import org.apache.hadoop.hbase.client.Put; 029import org.apache.hadoop.hbase.client.RegionInfo; 030import org.apache.hadoop.hbase.client.RegionLocator; 031import org.apache.hadoop.hbase.client.Result; 032import org.apache.hadoop.hbase.client.ResultScanner; 033import org.apache.hadoop.hbase.client.Scan; 034import org.apache.hadoop.hbase.client.Table; 035import org.apache.hadoop.hbase.testclassification.LargeTests; 036import org.apache.hadoop.hbase.testclassification.MasterTests; 037import org.apache.hadoop.hbase.util.Bytes; 038import org.junit.jupiter.api.AfterAll; 039import org.junit.jupiter.api.BeforeAll; 040import org.junit.jupiter.api.BeforeEach; 041import org.junit.jupiter.api.Disabled; 042import org.junit.jupiter.api.Tag; 043import org.junit.jupiter.api.Test; 044import org.slf4j.Logger; 045import org.slf4j.LoggerFactory; 046 047/** 048 * Test transitions of state across the master. Sets up the cluster once and then runs a couple of 049 * tests. 050 */ 051@Tag(MasterTests.TAG) 052@Tag(LargeTests.TAG) 053public class TestMasterTransitions { 054 055 private static final Logger LOG = LoggerFactory.getLogger(TestMasterTransitions.class); 056 private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); 057 private static final TableName TABLENAME = TableName.valueOf("master_transitions"); 058 private static final byte[][] FAMILIES = 059 new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; 060 061 /** 062 * Start up a mini cluster and put a small table of many empty regions into it. 063 */ 064 @BeforeAll 065 public static void beforeAllTests() throws Exception { 066 TEST_UTIL.startMiniCluster(2); 067 // Create a table of three families. This will assign a region. 068 TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILIES); 069 Table t = TEST_UTIL.getConnection().getTable(TABLENAME); 070 int countOfRegions = -1; 071 try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(TABLENAME)) { 072 countOfRegions = r.getStartKeys().length; 073 } 074 TEST_UTIL.waitUntilAllRegionsAssigned(TABLENAME); 075 addToEachStartKey(countOfRegions); 076 t.close(); 077 } 078 079 @AfterAll 080 public static void afterAllTests() throws Exception { 081 TEST_UTIL.shutdownMiniCluster(); 082 } 083 084 @BeforeEach 085 public void setup() throws IOException { 086 TEST_UTIL.ensureSomeRegionServersAvailable(2); 087 } 088 089 /** 090 * Listener for regionserver events testing hbase-2428 (Infinite loop of region closes if 091 * hbase:meta region is offline). In particular, listen for the close of the 'metaServer' and when 092 * it comes in, requeue it with a delay as though there were an issue processing the shutdown. As 093 * part of the requeuing, send over a close of a region on 'otherServer' so it comes into a master 094 * that has its meta region marked as offline. 095 */ 096 /* 097 * static class HBase2428Listener implements RegionServerOperationListener { // Map of what we've 098 * delayed so we don't do do repeated delays. private final Set<RegionServerOperation> postponed = 099 * new CopyOnWriteArraySet<RegionServerOperation>(); private boolean done = false;; private 100 * boolean metaShutdownReceived = false; private final HServerAddress metaAddress; private final 101 * MiniHBaseCluster cluster; private final int otherServerIndex; private final RegionInfo hri; 102 * private int closeCount = 0; static final int SERVER_DURATION = 3 * 1000; static final int 103 * CLOSE_DURATION = 1 * 1000; HBase2428Listener(final MiniHBaseCluster c, final HServerAddress 104 * metaAddress, final RegionInfo closingHRI, final int otherServerIndex) { this.cluster = c; 105 * this.metaAddress = metaAddress; this.hri = closingHRI; this.otherServerIndex = 106 * otherServerIndex; } 107 * @Override public boolean process(final RegionServerOperation op) throws IOException { // If a 108 * regionserver shutdown and its of the meta server, then we want to // delay the processing of 109 * the shutdown and send off a close of a region on // the 'otherServer. boolean result = true; if 110 * (op instanceof ProcessServerShutdown) { ProcessServerShutdown pss = (ProcessServerShutdown)op; 111 * if (pss.getDeadServerAddress().equals(this.metaAddress)) { // Don't postpone more than once. if 112 * (!this.postponed.contains(pss)) { // Close some region. 113 * this.cluster.addMessageToSendRegionServer(this.otherServerIndex, new 114 * HMsg(HMsg.Type.MSG_REGION_CLOSE, hri, Bytes.toBytes("Forcing close in test"))); 115 * this.postponed.add(pss); // Put off the processing of the regionserver shutdown processing. 116 * pss.setDelay(SERVER_DURATION); this.metaShutdownReceived = true; // Return false. This will add 117 * this op to the delayed queue. result = false; } } } else { // Have the close run frequently. if 118 * (isWantedCloseOperation(op) != null) { op.setDelay(CLOSE_DURATION); // Count how many times it 119 * comes through here. this.closeCount++; } } return result; } public void processed(final 120 * RegionServerOperation op) { if (isWantedCloseOperation(op) != null) return; this.done = true; } 121 */ 122 /* 123 * @return Null if not the wanted ProcessRegionClose, else <code>op</code> cast as a 124 * ProcessRegionClose. 125 */ 126 /* 127 * private ProcessRegionClose isWantedCloseOperation(final RegionServerOperation op) { // Count 128 * every time we get a close operation. if (op instanceof ProcessRegionClose) { ProcessRegionClose 129 * c = (ProcessRegionClose)op; if (c.regionInfo.equals(hri)) { return c; } } return null; } 130 * boolean isDone() { return this.done; } boolean isMetaShutdownReceived() { return 131 * metaShutdownReceived; } int getCloseCount() { return this.closeCount; } 132 * @Override public boolean process(HServerInfo serverInfo, HMsg incomingMsg) { return true; } } 133 */ 134 /** 135 * In 2428, the meta region has just been set offline and then a close comes in. 136 * @see <a href="https://issues.apache.org/jira/browse/HBASE-2428">HBASE-2428</a> 137 */ 138 @Disabled 139 @Test 140 public void testRegionCloseWhenNoMetaHBase2428() throws Exception { 141 /* 142 * LOG.info("Running testRegionCloseWhenNoMetaHBase2428"); MiniHBaseCluster cluster = 143 * TEST_UTIL.getHBaseCluster(); final HMaster master = cluster.getMaster(); int metaIndex = 144 * cluster.getServerWithMeta(); // Figure the index of the server that is not server the 145 * hbase:meta int otherServerIndex = -1; for (int i = 0; i < 146 * cluster.getRegionServerThreads().size(); i++) { if (i == metaIndex) continue; 147 * otherServerIndex = i; break; } final HRegionServer otherServer = 148 * cluster.getRegionServer(otherServerIndex); final HRegionServer metaHRS = 149 * cluster.getRegionServer(metaIndex); // Get a region out on the otherServer. final RegionInfo 150 * hri = otherServer.getOnlineRegions().iterator().next().getRegionInfo(); // Add our 151 * RegionServerOperationsListener HBase2428Listener listener = new HBase2428Listener(cluster, 152 * metaHRS.getHServerInfo().getServerAddress(), hri, otherServerIndex); 153 * master.getRegionServerOperationQueue(). registerRegionServerOperationListener(listener); try 154 * { // Now close the server carrying meta. cluster.abortRegionServer(metaIndex); // First wait 155 * on receipt of meta server shutdown message. while(!listener.metaShutdownReceived) 156 * Threads.sleep(100); while(!listener.isDone()) Threads.sleep(10); // We should not have 157 * retried the close more times than it took for the // server shutdown message to exit the 158 * delay queue and get processed // (Multiple by two to add in some slop in case of GC or 159 * something). assertTrue(listener.getCloseCount() > 1); assertTrue(listener.getCloseCount() < 160 * ((HBase2428Listener.SERVER_DURATION/HBase2428Listener.CLOSE_DURATION) * 2)); // Assert the 161 * closed region came back online assertRegionIsBackOnline(hri); } finally { 162 * master.getRegionServerOperationQueue(). unregisterRegionServerOperationListener(listener); } 163 */ 164 } 165 166 /** 167 * Test adding in a new server before old one on same host+port is dead. Make the test more 168 * onerous by having the server under test carry the meta. If confusion between old and new, 169 * purportedly meta never comes back. Test that meta gets redeployed. 170 */ 171 @Disabled 172 @Test 173 public void testAddingServerBeforeOldIsDead2413() throws IOException { 174 /* 175 * LOG.info("Running testAddingServerBeforeOldIsDead2413"); MiniHBaseCluster cluster = 176 * TEST_UTIL.getHBaseCluster(); int count = count(); int metaIndex = 177 * cluster.getServerWithMeta(); MiniHBaseClusterRegionServer metaHRS = 178 * (MiniHBaseClusterRegionServer)cluster.getRegionServer(metaIndex); int port = 179 * metaHRS.getServerInfo().getServerAddress().getPort(); Configuration c = 180 * TEST_UTIL.getConfiguration(); String oldPort = c.get(HConstants.REGIONSERVER_PORT, "0"); try 181 * { LOG.info("KILLED=" + metaHRS); metaHRS.kill(); c.set(HConstants.REGIONSERVER_PORT, 182 * Integer.toString(port)); // Try and start new regionserver. It might clash with the old // 183 * regionserver port so keep trying to get past the BindException. HRegionServer hrs = null; 184 * while (true) { try { hrs = cluster.startRegionServer().getRegionServer(); break; } catch 185 * (IOException e) { if (e.getCause() != null && e.getCause() instanceof 186 * InvocationTargetException) { InvocationTargetException ee = 187 * (InvocationTargetException)e.getCause(); if (ee.getCause() != null && ee.getCause() 188 * instanceof BindException) { LOG.info("BindException; retrying: " + e.toString()); } } } } 189 * LOG.info("STARTED=" + hrs); // Wait until he's been given at least 3 regions before we go on 190 * to try // and count rows in table. while (hrs.getOnlineRegions().size() < 3) 191 * Threads.sleep(100); LOG.info(hrs.toString() + " has " + hrs.getOnlineRegions().size() + 192 * " regions"); assertEquals(count, count()); } finally { c.set(HConstants.REGIONSERVER_PORT, 193 * oldPort); } 194 */ 195 } 196 197 /** 198 * HBase2482 is about outstanding region openings. If any are outstanding when a regionserver goes 199 * down, then they'll never deploy. They'll be stuck in the regions-in-transition list for ever. 200 * This listener looks for a region opening HMsg and if its from the server passed on 201 * construction, then we kill it. It also looks out for a close message on the victim server 202 * because that signifies start of the fireworks. 203 */ 204 /* 205 * static class HBase2482Listener implements RegionServerOperationListener { private final 206 * HRegionServer victim; private boolean abortSent = false; // We closed regions on new server. 207 * private volatile boolean closed = false; // Copy of regions on new server private final 208 * Collection<HRegion> copyOfOnlineRegions; // This is the region that was in transition on the 209 * server we aborted. Test // passes if this region comes back online successfully. private 210 * RegionInfo regionToFind; HBase2482Listener(final HRegionServer victim) { this.victim = victim; 211 * // Copy regions currently open on this server so I can notice when // there is a close. 212 * this.copyOfOnlineRegions = this.victim.getCopyOfOnlineRegionsSortedBySize().values(); } 213 * @Override public boolean process(HServerInfo serverInfo, HMsg incomingMsg) { if 214 * (!victim.getServerInfo().equals(serverInfo) || this.abortSent || !this.closed) { return true; } 215 * if (!incomingMsg.isType(HMsg.Type.MSG_REPORT_PROCESS_OPEN)) return true; // Save the region 216 * that is in transition so can test later it came back. this.regionToFind = 217 * incomingMsg.getRegionInfo(); String msg = "ABORTING " + this.victim + " because got a " + 218 * HMsg.Type.MSG_REPORT_PROCESS_OPEN + " on this server for " + 219 * incomingMsg.getRegionInfo().getRegionNameAsString(); this.victim.abort(msg); this.abortSent = 220 * true; return true; } 221 * @Override public boolean process(RegionServerOperation op) throws IOException { return true; } 222 * @Override public void processed(RegionServerOperation op) { if (this.closed || !(op instanceof 223 * ProcessRegionClose)) return; ProcessRegionClose close = (ProcessRegionClose)op; for (HRegion r: 224 * this.copyOfOnlineRegions) { if (r.getRegionInfo().equals(close.regionInfo)) { // We've closed 225 * one of the regions that was on the victim server. // Now can start testing for when all regions 226 * are back online again LOG.info("Found close of " + r.getRegionInfo().getRegionNameAsString() + 227 * "; setting close happened flag"); this.closed = true; break; } } } } 228 */ 229 /** 230 * In 2482, a RS with an opening region on it dies. The said region is then stuck in the master's 231 * regions-in-transition and never leaves it. This test works by bringing up a new regionserver, 232 * waiting for the load balancer to give it some regions. Then, we close all on the new server. 233 * After sending all the close messages, we send the new regionserver the special blocking message 234 * so it can not process any more messages. Meantime reopening of the just-closed regions is 235 * backed up on the new server. Soon as master gets an opening region from the new regionserver, 236 * we kill it. We then wait on all regions to come back on line. If bug is fixed, this should 237 * happen soon as the processing of the killed server is done. 238 * @see <a href="https://issues.apache.org/jira/browse/HBASE-2482">HBASE-2482</a> 239 */ 240 @Disabled 241 @Test 242 public void testKillRSWithOpeningRegion2482() throws Exception { 243 /* 244 * LOG.info("Running testKillRSWithOpeningRegion2482"); MiniHBaseCluster cluster = 245 * TEST_UTIL.getHBaseCluster(); if (cluster.getLiveRegionServerThreads().size() < 2) { // Need 246 * at least two servers. cluster.startRegionServer(); } // Count how many regions are online. 247 * They need to be all back online for // this test to succeed. int countOfMetaRegions = 248 * countOfMetaRegions(); // Add a listener on the server. HMaster m = cluster.getMaster(); // 249 * Start new regionserver. MiniHBaseClusterRegionServer hrs = 250 * (MiniHBaseClusterRegionServer)cluster.startRegionServer().getRegionServer(); 251 * LOG.info("Started new regionserver: " + hrs.toString()); // Wait until has some regions 252 * before proceeding. Balancer will give it some. int minimumRegions = 253 * countOfMetaRegions/(cluster.getRegionServerThreads().size() * 2); while 254 * (hrs.getOnlineRegions().size() < minimumRegions) Threads.sleep(100); // Set the listener only 255 * after some regions have been opened on new server. HBase2482Listener listener = new 256 * HBase2482Listener(hrs); m.getRegionServerOperationQueue(). 257 * registerRegionServerOperationListener(listener); try { // Go close all non-catalog regions on 258 * this new server closeAllNonCatalogRegions(cluster, hrs); // After all closes, add blocking 259 * message before the region opens start to // come in. 260 * cluster.addMessageToSendRegionServer(hrs, new HMsg(HMsg.Type.TESTING_BLOCK_REGIONSERVER)); // 261 * Wait till one of the above close messages has an effect before we start // wait on all 262 * regions back online. while (!listener.closed) Threads.sleep(100); LOG.info("Past close"); // 263 * Make sure the abort server message was sent. while(!listener.abortSent) Threads.sleep(100); 264 * LOG.info("Past abort send; waiting on all regions to redeploy"); // Now wait for regions to 265 * come back online. assertRegionIsBackOnline(listener.regionToFind); } finally { 266 * m.getRegionServerOperationQueue(). unregisterRegionServerOperationListener(listener); } 267 */ 268 } 269 270 /* 271 * @return Count of all non-catalog regions on the designated server 272 */ 273 /* 274 * private int closeAllNonCatalogRegions(final MiniHBaseCluster cluster, final 275 * MiniHBaseCluster.MiniHBaseClusterRegionServer hrs) throws IOException { int countOfRegions = 0; 276 * for (HRegion r: hrs.getOnlineRegions()) { if (r.getRegionInfo().isMetaRegion()) continue; 277 * cluster.addMessageToSendRegionServer(hrs, new HMsg(HMsg.Type.MSG_REGION_CLOSE, 278 * r.getRegionInfo())); LOG.info("Sent close of " + r.getRegionInfo().getRegionNameAsString() + 279 * " on " + hrs.toString()); countOfRegions++; } return countOfRegions; } private void 280 * assertRegionIsBackOnline(final RegionInfo hri) throws IOException { // Region should have an 281 * entry in its startkey because of addRowToEachRegion. byte [] row = getStartKey(hri); HTable t = 282 * new HTable(TEST_UTIL.getConfiguration(), TABLENAME); Get g = new Get(row); 283 * assertTrue((t.get(g)).size() > 0); } /* 284 * @return Count of regions in meta table. 285 */ 286 /* 287 * private static int countOfMetaRegions() throws IOException { HTable meta = new 288 * HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME); int rows = 0; Scan scan = new 289 * Scan(); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); ResultScanner s 290 * = meta.getScanner(scan); for (Result r = null; (r = s.next()) != null;) { byte [] b = 291 * r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); if (b == null || b.length 292 * <= 0) break; rows++; } s.close(); return rows; } 293 */ 294 /* 295 * Add to each of the regions in hbase:meta a value. Key is the startrow of the region (except its 296 * 'aaa' for first region). Actual value is the row name. 297 */ 298 private static int addToEachStartKey(final int expected) throws IOException { 299 Table t = TEST_UTIL.getConnection().getTable(TABLENAME); 300 Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); 301 int rows = 0; 302 Scan scan = new Scan(); 303 scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); 304 ResultScanner s = meta.getScanner(scan); 305 for (Result r = null; (r = s.next()) != null;) { 306 RegionInfo hri = CatalogFamilyFormat.getRegionInfo(r); 307 if (hri == null) break; 308 if (!hri.getTable().equals(TABLENAME)) { 309 continue; 310 } 311 312 // If start key, add 'aaa'. 313 if (!hri.getTable().equals(TABLENAME)) { 314 continue; 315 } 316 byte[] row = getStartKey(hri); 317 Put p = new Put(row); 318 p.setDurability(Durability.SKIP_WAL); 319 p.addColumn(getTestFamily(), getTestQualifier(), row); 320 t.put(p); 321 rows++; 322 } 323 s.close(); 324 assertEquals(expected, rows); 325 t.close(); 326 meta.close(); 327 return rows; 328 } 329 330 /* 331 * @return Start key for hri (If start key is '', then return 'aaa'. 332 */ 333 private static byte[] getStartKey(final RegionInfo hri) { 334 return Bytes.equals(HConstants.EMPTY_START_ROW, hri.getStartKey()) 335 ? Bytes.toBytes("aaa") 336 : hri.getStartKey(); 337 } 338 339 private static byte[] getTestFamily() { 340 return FAMILIES[0]; 341 } 342 343 private static byte[] getTestQualifier() { 344 return getTestFamily(); 345 } 346}