001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver.regionreplication;
019
020import static org.junit.Assert.assertNotNull;
021import static org.junit.Assert.assertThrows;
022import static org.junit.Assert.assertTrue;
023
024import java.io.IOException;
025import java.util.Collections;
026import org.apache.hadoop.hbase.HBaseClassTestRule;
027import org.apache.hadoop.hbase.HBaseTestingUtil;
028import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
029import org.apache.hadoop.hbase.ServerName;
030import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
031import org.apache.hadoop.hbase.master.HMaster;
032import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
033import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
034import org.apache.hadoop.hbase.procedure2.Procedure;
035import org.apache.hadoop.hbase.replication.ReplicationGroupOffset;
036import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
037import org.apache.hadoop.hbase.replication.ReplicationQueueId;
038import org.apache.hadoop.hbase.replication.ReplicationUtils;
039import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface;
040import org.apache.hadoop.hbase.testclassification.MediumTests;
041import org.apache.hadoop.hbase.testclassification.RegionServerTests;
042import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
043import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
044import org.junit.AfterClass;
045import org.junit.BeforeClass;
046import org.junit.ClassRule;
047import org.junit.Test;
048import org.junit.experimental.categories.Category;
049
050/**
051 * Make sure we could start the cluster with RegionReplicaReplicationEndpoint configured.
052 */
053@Category({ RegionServerTests.class, MediumTests.class })
054public class TestStartupWithLegacyRegionReplicationEndpoint {
055
056  @ClassRule
057  public static final HBaseClassTestRule CLASS_RULE =
058    HBaseClassTestRule.forClass(TestStartupWithLegacyRegionReplicationEndpoint.class);
059
060  private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
061
062  @BeforeClass
063  public static void setUp() throws Exception {
064    UTIL.startMiniCluster(1);
065    // add a peer to force initialize the replication storage
066    UTIL.getAdmin().addReplicationPeer("1", ReplicationPeerConfig.newBuilder()
067      .setClusterKey(UTIL.getZkCluster().getAddress().toString() + ":/1").build());
068    UTIL.getAdmin().removeReplicationPeer("1");
069  }
070
071  @AfterClass
072  public static void tearDown() throws IOException {
073    UTIL.shutdownMiniCluster();
074  }
075
076  @Test
077  public void test() throws Exception {
078    String peerId = "legacy";
079    ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder()
080      .setClusterKey("127.0.0.1:2181:/hbase")
081      .setReplicationEndpointImpl(ReplicationUtils.LEGACY_REGION_REPLICATION_ENDPOINT_NAME).build();
082    SingleProcessHBaseCluster cluster = UTIL.getMiniHBaseCluster();
083    HMaster master = cluster.getMaster();
084    // can not use Admin.addPeer as it will fail with ClassNotFound
085    master.getReplicationPeerManager().addPeer(peerId, peerConfig, true);
086    // add a wal file to the queue
087    ServerName rsName = cluster.getRegionServer(0).getServerName();
088    master.getReplicationPeerManager().getQueueStorage().setOffset(
089      new ReplicationQueueId(rsName, ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_PEER), "",
090      new ReplicationGroupOffset("test-wal-file", 0), Collections.emptyMap());
091    cluster.stopRegionServer(0);
092    RegionServerThread rst = cluster.startRegionServer();
093    // we should still have this peer
094    assertNotNull(UTIL.getAdmin().getReplicationPeerConfig(peerId));
095    // but at RS side, we should not have this peer loaded as replication source
096    assertTrue(
097      rst.getRegionServer().getReplicationSourceService().getReplicationManager().getSources()
098        .stream().map(ReplicationSourceInterface::getPeerId).noneMatch(p -> p.equals(peerId)));
099
100    UTIL.shutdownMiniHBaseCluster();
101    UTIL.restartHBaseCluster(1);
102    // now we should have removed the peer
103    assertThrows(ReplicationPeerNotFoundException.class,
104      () -> UTIL.getAdmin().getReplicationPeerConfig("legacy"));
105
106    // make sure that we can finish the SCP
107    UTIL.waitFor(15000,
108      () -> UTIL.getMiniHBaseCluster().getMaster().getProcedures().stream()
109        .filter(p -> p instanceof ServerCrashProcedure).map(p -> (ServerCrashProcedure) p)
110        .allMatch(Procedure::isSuccess));
111    // the deletion is async, so wait until they get deleted
112    ReplicationPeerManager ppm = UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager();
113    UTIL.waitFor(15000, () -> !ppm.getPeerStorage().listPeerIds().contains(peerId)
114      && ppm.getQueueStorage().listAllQueueIds(peerId, rsName).isEmpty());
115  }
116}