001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.util; 019 020import static org.junit.jupiter.api.Assertions.assertEquals; 021import static org.junit.jupiter.api.Assertions.assertFalse; 022import static org.junit.jupiter.api.Assertions.assertTrue; 023 024import java.io.IOException; 025import java.util.ArrayList; 026import java.util.List; 027import org.apache.hadoop.hbase.ClientMetaTableAccessor; 028import org.apache.hadoop.hbase.HBaseTestingUtil; 029import org.apache.hadoop.hbase.HConstants; 030import org.apache.hadoop.hbase.MetaTableAccessor; 031import org.apache.hadoop.hbase.ServerName; 032import org.apache.hadoop.hbase.TableName; 033import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 034import org.apache.hadoop.hbase.client.Put; 035import org.apache.hadoop.hbase.client.RegionInfo; 036import org.apache.hadoop.hbase.client.RegionInfoBuilder; 037import org.apache.hadoop.hbase.client.Result; 038import org.apache.hadoop.hbase.client.ResultScanner; 039import org.apache.hadoop.hbase.client.Scan; 040import org.apache.hadoop.hbase.client.Table; 041import org.apache.hadoop.hbase.client.TableDescriptor; 042import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 043import org.apache.hadoop.hbase.master.RegionState; 044import org.apache.hadoop.hbase.replication.ReplicationBarrierFamilyFormat; 045import org.apache.hadoop.hbase.replication.ReplicationException; 046import org.apache.hadoop.hbase.replication.ReplicationGroupOffset; 047import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; 048import org.apache.hadoop.hbase.replication.ReplicationQueueId; 049import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; 050import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; 051import org.apache.hadoop.hbase.testclassification.MediumTests; 052import org.apache.hadoop.hbase.testclassification.ReplicationTests; 053import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; 054import org.junit.jupiter.api.AfterAll; 055import org.junit.jupiter.api.BeforeAll; 056import org.junit.jupiter.api.Tag; 057import org.junit.jupiter.api.Test; 058 059import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; 060 061@Tag(ReplicationTests.TAG) 062@Tag(MediumTests.TAG) 063public class TestHBaseFsckCleanReplicationBarriers { 064 065 private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); 066 067 private static String PEER_1 = "1", PEER_2 = "2"; 068 069 private static ReplicationQueueStorage QUEUE_STORAGE; 070 071 private static String WAL_FILE_NAME = "test.wal"; 072 073 private static String TABLE_NAME = "test"; 074 075 private static String COLUMN_FAMILY = "info"; 076 077 @BeforeAll 078 public static void setUp() throws Exception { 079 UTIL.startMiniCluster(1); 080 QUEUE_STORAGE = ReplicationStorageFactory.getReplicationQueueStorage(UTIL.getConnection(), 081 UTIL.getConfiguration()); 082 createPeer(); 083 } 084 085 @AfterAll 086 public static void tearDown() throws Exception { 087 UTIL.shutdownMiniCluster(); 088 } 089 090 @Test 091 public void testCleanReplicationBarrierWithNonExistTable() 092 throws ClassNotFoundException, IOException { 093 TableName tableName = TableName.valueOf(TABLE_NAME + "_non"); 094 boolean cleaned = HbckTestingUtil.cleanReplicationBarrier(UTIL.getConfiguration(), tableName); 095 assertFalse(cleaned); 096 } 097 098 @Test 099 public void testCleanReplicationBarrierWithDeletedTable() throws Exception { 100 TableName tableName = TableName.valueOf(TABLE_NAME + "_deleted"); 101 List<RegionInfo> regionInfos = new ArrayList<>(); 102 // only write some barriers into meta table 103 104 for (int i = 0; i < 110; i++) { 105 RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(i)) 106 .setEndKey(Bytes.toBytes(i + 1)).build(); 107 regionInfos.add(regionInfo); 108 addStateAndBarrier(regionInfo, RegionState.State.OPEN, 10, 100); 109 updatePushedSeqId(regionInfo, 10); 110 assertEquals(10, QUEUE_STORAGE.getLastSequenceId(regionInfo.getEncodedName(), PEER_1), 111 "check if there is lastPushedId"); 112 assertEquals(10, QUEUE_STORAGE.getLastSequenceId(regionInfo.getEncodedName(), PEER_2), 113 "check if there is lastPushedId"); 114 } 115 Scan barrierScan = new Scan(); 116 barrierScan.setCaching(100); 117 barrierScan.addFamily(HConstants.REPLICATION_BARRIER_FAMILY); 118 barrierScan 119 .withStartRow(ClientMetaTableAccessor.getTableStartRowForMeta(tableName, 120 ClientMetaTableAccessor.QueryType.REGION)) 121 .withStopRow(ClientMetaTableAccessor.getTableStopRowForMeta(tableName, 122 ClientMetaTableAccessor.QueryType.REGION)); 123 Result result; 124 try (ResultScanner scanner = 125 MetaTableAccessor.getMetaHTable(UTIL.getConnection()).getScanner(barrierScan)) { 126 while ((result = scanner.next()) != null) { 127 assertTrue(ReplicationBarrierFamilyFormat.getReplicationBarriers(result).length > 0); 128 } 129 } 130 boolean cleaned = HbckTestingUtil.cleanReplicationBarrier(UTIL.getConfiguration(), tableName); 131 assertTrue(cleaned); 132 for (RegionInfo regionInfo : regionInfos) { 133 assertEquals(-1, QUEUE_STORAGE.getLastSequenceId(regionInfo.getEncodedName(), PEER_1), 134 "check if there is lastPushedId"); 135 assertEquals(-1, QUEUE_STORAGE.getLastSequenceId(regionInfo.getEncodedName(), PEER_2), 136 "check if there is lastPushedId"); 137 } 138 cleaned = HbckTestingUtil.cleanReplicationBarrier(UTIL.getConfiguration(), tableName); 139 assertFalse(cleaned); 140 for (RegionInfo region : regionInfos) { 141 assertEquals(0, ReplicationBarrierFamilyFormat.getReplicationBarriers(UTIL.getConnection(), 142 region.getRegionName()).length); 143 } 144 } 145 146 @Test 147 public void testCleanReplicationBarrierWithExistTable() throws Exception { 148 TableName tableName = TableName.valueOf(TABLE_NAME); 149 String cf = COLUMN_FAMILY; 150 TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) 151 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(cf)).build()) 152 .setReplicationScope(HConstants.REPLICATION_SCOPE_LOCAL).build(); 153 UTIL.createTable(tableDescriptor, Bytes.split(Bytes.toBytes(1), Bytes.toBytes(256), 123)); 154 assertTrue(UTIL.getAdmin().getRegions(tableName).size() > 0); 155 for (RegionInfo region : UTIL.getAdmin().getRegions(tableName)) { 156 addStateAndBarrier(region, RegionState.State.OFFLINE, 10, 100); 157 updatePushedSeqId(region, 10); 158 assertEquals(10, QUEUE_STORAGE.getLastSequenceId(region.getEncodedName(), PEER_1), 159 "check if there is lastPushedId"); 160 assertEquals(10, QUEUE_STORAGE.getLastSequenceId(region.getEncodedName(), PEER_2), 161 "check if there is lastPushedId"); 162 } 163 boolean cleaned = HbckTestingUtil.cleanReplicationBarrier(UTIL.getConfiguration(), tableName); 164 assertTrue(cleaned); 165 for (RegionInfo region : UTIL.getAdmin().getRegions(tableName)) { 166 assertEquals(-1, QUEUE_STORAGE.getLastSequenceId(region.getEncodedName(), PEER_1), 167 "check if there is lastPushedId"); 168 assertEquals(-1, QUEUE_STORAGE.getLastSequenceId(region.getEncodedName(), PEER_2), 169 "check if there is lastPushedId"); 170 } 171 cleaned = HbckTestingUtil.cleanReplicationBarrier(UTIL.getConfiguration(), tableName); 172 assertFalse(cleaned); 173 for (RegionInfo region : UTIL.getAdmin().getRegions(tableName)) { 174 assertEquals(0, ReplicationBarrierFamilyFormat.getReplicationBarriers(UTIL.getConnection(), 175 region.getRegionName()).length); 176 } 177 } 178 179 public static void createPeer() throws IOException { 180 ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() 181 .setClusterKey(UTIL.getZkConnectionURI() + "-test").setSerial(true).build(); 182 UTIL.getAdmin().addReplicationPeer(PEER_1, rpc); 183 UTIL.getAdmin().addReplicationPeer(PEER_2, rpc); 184 } 185 186 private void addStateAndBarrier(RegionInfo region, RegionState.State state, long... barriers) 187 throws IOException { 188 Put put = new Put(region.getRegionName(), EnvironmentEdgeManager.currentTime()); 189 if (state != null) { 190 put.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER, 191 Bytes.toBytes(state.name())); 192 } 193 for (int i = 0; i < barriers.length; i++) { 194 put.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER, 195 put.getTimestamp() - barriers.length + i, Bytes.toBytes(barriers[i])); 196 } 197 try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { 198 table.put(put); 199 } 200 } 201 202 private void updatePushedSeqId(RegionInfo region, long seqId) throws ReplicationException { 203 ServerName sn = UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName(); 204 QUEUE_STORAGE.setOffset(new ReplicationQueueId(sn, PEER_1), "", 205 new ReplicationGroupOffset(WAL_FILE_NAME, 10), 206 ImmutableMap.of(region.getEncodedName(), seqId)); 207 QUEUE_STORAGE.setOffset(new ReplicationQueueId(sn, PEER_2), "", 208 new ReplicationGroupOffset(WAL_FILE_NAME, 10), 209 ImmutableMap.of(region.getEncodedName(), seqId)); 210 } 211}