001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.replication;
019
020import static org.hamcrest.CoreMatchers.containsString;
021import static org.hamcrest.MatcherAssert.assertThat;
022import static org.junit.Assert.assertFalse;
023import static org.junit.Assert.assertTrue;
024import static org.junit.Assert.fail;
025
026import java.io.IOException;
027import java.util.Arrays;
028import org.apache.hadoop.fs.FileSystem;
029import org.apache.hadoop.fs.Path;
030import org.apache.hadoop.hbase.DoNotRetryIOException;
031import org.apache.hadoop.hbase.HBaseClassTestRule;
032import org.apache.hadoop.hbase.client.Append;
033import org.apache.hadoop.hbase.client.Delete;
034import org.apache.hadoop.hbase.client.Get;
035import org.apache.hadoop.hbase.client.Mutation;
036import org.apache.hadoop.hbase.client.Put;
037import org.apache.hadoop.hbase.client.RetriesExhaustedException;
038import org.apache.hadoop.hbase.client.RowMutations;
039import org.apache.hadoop.hbase.client.Table;
040import org.apache.hadoop.hbase.master.MasterFileSystem;
041import org.apache.hadoop.hbase.testclassification.LargeTests;
042import org.apache.hadoop.hbase.testclassification.ReplicationTests;
043import org.apache.hadoop.hbase.util.Bytes;
044import org.junit.Assert;
045import org.junit.ClassRule;
046import org.junit.Test;
047import org.junit.experimental.categories.Category;
048
049@Category({ ReplicationTests.class, LargeTests.class })
050public class TestSyncReplicationStandBy extends SyncReplicationTestBase {
051
052  @ClassRule
053  public static final HBaseClassTestRule CLASS_RULE =
054    HBaseClassTestRule.forClass(TestSyncReplicationStandBy.class);
055
056  @FunctionalInterface
057  private interface TableAction {
058
059    void call(Table table) throws IOException;
060  }
061
062  private void assertDisallow(Table table, TableAction action) throws IOException {
063    try {
064      action.call(table);
065      fail("Should not allow the action");
066    } catch (DoNotRetryIOException | RetriesExhaustedException e) {
067      // expected
068      assertThat(e.getMessage(), containsString("STANDBY"));
069    }
070  }
071
072  @Test
073  public void testStandby() throws Exception {
074    MasterFileSystem mfs = UTIL2.getHBaseCluster().getMaster().getMasterFileSystem();
075    Path remoteWALDir = getRemoteWALDir(mfs, PEER_ID);
076    assertFalse(mfs.getWALFileSystem().exists(remoteWALDir));
077    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
078      SyncReplicationState.STANDBY);
079    assertTrue(mfs.getWALFileSystem().exists(remoteWALDir));
080    try (Table table = UTIL2.getConnection().getTable(TABLE_NAME)) {
081      assertDisallow(table, t -> t.get(new Get(Bytes.toBytes("row"))));
082      assertDisallow(table,
083        t -> t.put(new Put(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("row"))));
084      assertDisallow(table, t -> t.delete(new Delete(Bytes.toBytes("row"))));
085      assertDisallow(table, t -> t.incrementColumnValue(Bytes.toBytes("row"), CF, CQ, 1));
086      assertDisallow(table,
087        t -> t.append(new Append(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("row"))));
088      assertDisallow(table,
089        t -> t.get(Arrays.asList(new Get(Bytes.toBytes("row")), new Get(Bytes.toBytes("row1")))));
090      assertDisallow(table,
091        t -> t
092          .put(Arrays.asList(new Put(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("row")),
093            new Put(Bytes.toBytes("row1")).addColumn(CF, CQ, Bytes.toBytes("row1")))));
094      assertDisallow(table, t -> t.mutateRow(new RowMutations(Bytes.toBytes("row"))
095        .add((Mutation) new Put(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("row")))));
096    }
097    // We should still allow replication writes
098    writeAndVerifyReplication(UTIL1, UTIL2, 0, 100);
099
100    // Remove the peers in ACTIVE & STANDBY cluster.
101    FileSystem fs2 = REMOTE_WAL_DIR2.getFileSystem(UTIL2.getConfiguration());
102    Assert.assertTrue(fs2.exists(getRemoteWALDir(REMOTE_WAL_DIR2, PEER_ID)));
103
104    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
105      SyncReplicationState.DOWNGRADE_ACTIVE);
106    Assert.assertFalse(fs2.exists(getRemoteWALDir(REMOTE_WAL_DIR2, PEER_ID)));
107    Assert.assertFalse(fs2.exists(getReplayRemoteWALs(REMOTE_WAL_DIR2, PEER_ID)));
108
109    UTIL1.getAdmin().removeReplicationPeer(PEER_ID);
110    verifyRemovedPeer(PEER_ID, REMOTE_WAL_DIR1, UTIL1);
111
112    // Peer remoteWAL dir will be renamed to replay WAL dir when transit from S to DA, and the
113    // replay WAL dir will be removed after replaying all WALs, so create a emtpy dir here to test
114    // whether the removeReplicationPeer would remove the remoteWAL dir.
115    fs2.create(getRemoteWALDir(REMOTE_WAL_DIR2, PEER_ID));
116    fs2.create(getReplayRemoteWALs(REMOTE_WAL_DIR2, PEER_ID));
117    Assert.assertTrue(fs2.exists(getRemoteWALDir(REMOTE_WAL_DIR2, PEER_ID)));
118    Assert.assertTrue(fs2.exists(getReplayRemoteWALs(REMOTE_WAL_DIR2, PEER_ID)));
119    UTIL2.getAdmin().removeReplicationPeer(PEER_ID);
120    verifyRemovedPeer(PEER_ID, REMOTE_WAL_DIR2, UTIL2);
121  }
122}