001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import static org.hamcrest.CoreMatchers.instanceOf; 021import static org.hamcrest.MatcherAssert.assertThat; 022import static org.junit.Assert.assertEquals; 023import static org.junit.Assert.assertNotEquals; 024import static org.junit.Assert.assertNotNull; 025import static org.junit.Assert.assertNotSame; 026import static org.junit.Assert.fail; 027 028import java.io.IOException; 029import java.util.List; 030import java.util.concurrent.CompletableFuture; 031import java.util.concurrent.ExecutionException; 032import java.util.concurrent.TimeUnit; 033import java.util.concurrent.TimeoutException; 034import java.util.stream.IntStream; 035import org.apache.hadoop.conf.Configuration; 036import org.apache.hadoop.hbase.Abortable; 037import org.apache.hadoop.hbase.HBaseClassTestRule; 038import org.apache.hadoop.hbase.HBaseTestingUtility; 039import org.apache.hadoop.hbase.HConstants; 040import org.apache.hadoop.hbase.HRegionLocation; 041import org.apache.hadoop.hbase.RegionLocations; 042import org.apache.hadoop.hbase.ServerName; 043import org.apache.hadoop.hbase.TableName; 044import org.apache.hadoop.hbase.master.RegionState; 045import org.apache.hadoop.hbase.testclassification.ClientTests; 046import org.apache.hadoop.hbase.testclassification.MediumTests; 047import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; 048import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient; 049import org.apache.hadoop.hbase.zookeeper.ZKUtil; 050import org.apache.hadoop.hbase.zookeeper.ZKWatcher; 051import org.apache.zookeeper.KeeperException; 052import org.junit.AfterClass; 053import org.junit.BeforeClass; 054import org.junit.ClassRule; 055import org.junit.Rule; 056import org.junit.Test; 057import org.junit.experimental.categories.Category; 058import org.junit.rules.TestName; 059import org.slf4j.Logger; 060import org.slf4j.LoggerFactory; 061 062import org.apache.hbase.thirdparty.com.google.common.io.Closeables; 063 064import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 065import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; 066 067@Category({ MediumTests.class, ClientTests.class }) 068public class TestZKConnectionRegistry { 069 070 @ClassRule 071 public static final HBaseClassTestRule CLASS_RULE = 072 HBaseClassTestRule.forClass(TestZKConnectionRegistry.class); 073 074 @Rule 075 public final TestName name = new TestName(); 076 077 static final Logger LOG = LoggerFactory.getLogger(TestZKConnectionRegistry.class); 078 static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 079 080 private static ZKConnectionRegistry REGISTRY; 081 082 @BeforeClass 083 public static void setUp() throws Exception { 084 TEST_UTIL.startMiniCluster(3); 085 HBaseTestingUtility.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); 086 REGISTRY = new ZKConnectionRegistry(TEST_UTIL.getConfiguration()); 087 } 088 089 @AfterClass 090 public static void tearDown() throws Exception { 091 Closeables.close(REGISTRY, true); 092 TEST_UTIL.shutdownMiniCluster(); 093 } 094 095 @Test 096 public void test() throws InterruptedException, ExecutionException, IOException { 097 LOG.info("STARTED TEST"); 098 String clusterId = REGISTRY.getClusterId().get(); 099 String expectedClusterId = TEST_UTIL.getHBaseCluster().getMaster().getClusterId(); 100 assertEquals("Expected " + expectedClusterId + ", found=" + clusterId, expectedClusterId, 101 clusterId); 102 assertEquals(TEST_UTIL.getHBaseCluster().getMaster().getServerName(), 103 REGISTRY.getActiveMaster().get()); 104 RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); 105 RegionLocations locs = REGISTRY.getMetaRegionLocations().get(); 106 assertEquals(3, locs.getRegionLocations().length); 107 IntStream.range(0, 3).forEach(i -> { 108 HRegionLocation loc = locs.getRegionLocation(i); 109 assertNotNull("Replica " + i + " doesn't have location", loc); 110 assertEquals(TableName.META_TABLE_NAME, loc.getRegion().getTable()); 111 assertEquals(i, loc.getRegion().getReplicaId()); 112 }); 113 } 114 115 @Test 116 public void testIndependentZKConnections() throws IOException { 117 try (ReadOnlyZKClient zk1 = REGISTRY.getZKClient()) { 118 Configuration otherConf = new Configuration(TEST_UTIL.getConfiguration()); 119 otherConf.set(HConstants.ZOOKEEPER_QUORUM, MiniZooKeeperCluster.HOST); 120 try (ZKConnectionRegistry otherRegistry = new ZKConnectionRegistry(otherConf)) { 121 ReadOnlyZKClient zk2 = otherRegistry.getZKClient(); 122 assertNotSame("Using a different configuration / quorum should result in " 123 + "different backing zk connection.", zk1, zk2); 124 assertNotEquals( 125 "Using a different configrution / quorum should be reflected in the zk connection.", 126 zk1.getConnectString(), zk2.getConnectString()); 127 } 128 } finally { 129 LOG.info("DONE!"); 130 } 131 } 132 133 @Test 134 public void testNoMetaAvailable() throws InterruptedException { 135 Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); 136 conf.set("zookeeper.znode.metaserver", "whatever"); 137 try (ZKConnectionRegistry registry = new ZKConnectionRegistry(conf)) { 138 try { 139 registry.getMetaRegionLocations().get(); 140 fail("Should have failed since we set an incorrect meta znode prefix"); 141 } catch (ExecutionException e) { 142 assertThat(e.getCause(), instanceOf(IOException.class)); 143 } 144 } 145 } 146 147 /** 148 * Pass discontinuous list of znodes to registry getMetaRegionLocation. Should work fine. It used 149 * to throw ArrayOutOfBoundsException. See HBASE-25280. 150 */ 151 @Test 152 public void testDiscontinuousLocations() throws ExecutionException, InterruptedException, 153 IOException, KeeperException, TimeoutException { 154 // Write discontinuous meta replica locations to a zk namespace particular to this test to 155 // avoid polluting other tests. 156 Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); 157 conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/" + this.name.getMethodName()); 158 ZooKeeperProtos.MetaRegionServer pbrsr = ZooKeeperProtos.MetaRegionServer.newBuilder() 159 .setServer(ProtobufUtil.toServerName(ServerName.valueOf("example.org,1,1"))) 160 .setRpcVersion(HConstants.RPC_CURRENT_VERSION).setState(RegionState.State.OPEN.convert()) 161 .build(); 162 byte[] data = ProtobufUtil.prependPBMagic(pbrsr.toByteArray()); 163 try (ZKWatcher zkw = new ZKWatcher(conf, this.name.getMethodName(), new Abortable() { 164 @Override 165 public void abort(String why, Throwable e) { 166 } 167 168 @Override 169 public boolean isAborted() { 170 return false; 171 } 172 })) { 173 // Write default replica and then a replica for replicaId #3. 174 ZKUtil.createSetData(zkw, zkw.getZNodePaths().getZNodeForReplica(0), data); 175 ZKUtil.createSetData(zkw, zkw.getZNodePaths().getZNodeForReplica(3), data); 176 List<String> znodes = zkw.getMetaReplicaNodes(); 177 assertEquals(2, znodes.size()); 178 try (ZKConnectionRegistry registry = new ZKConnectionRegistry(conf)) { 179 CompletableFuture<RegionLocations> cf = registry.getMetaRegionLocations(); 180 RegionLocations locations = cf.get(60, TimeUnit.SECONDS); 181 assertEquals(2, locations.numNonNullElements()); 182 } 183 } 184 } 185}