001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019package org.apache.hadoop.hbase.replication; 020 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.fail; 023 024import java.io.IOException; 025 026import org.apache.hadoop.conf.Configuration; 027import org.apache.hadoop.fs.FileSystem; 028import org.apache.hadoop.fs.Path; 029import org.apache.hadoop.hbase.HBaseClassTestRule; 030import org.apache.hadoop.hbase.HBaseConfiguration; 031import org.apache.hadoop.hbase.HBaseTestingUtility; 032import org.apache.hadoop.hbase.HConstants; 033import org.apache.hadoop.hbase.TableName; 034import org.apache.hadoop.hbase.client.Admin; 035import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 036import org.apache.hadoop.hbase.client.Put; 037import org.apache.hadoop.hbase.client.Result; 038import org.apache.hadoop.hbase.client.ResultScanner; 039import org.apache.hadoop.hbase.client.Scan; 040import org.apache.hadoop.hbase.client.Table; 041import org.apache.hadoop.hbase.client.TableDescriptor; 042import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 043import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; 044import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; 045import org.apache.hadoop.hbase.testclassification.LargeTests; 046import org.apache.hadoop.hbase.testclassification.ReplicationTests; 047import org.apache.hadoop.hbase.util.Bytes; 048import org.apache.hadoop.hbase.util.CommonFSUtils; 049import org.apache.hadoop.hbase.util.FSUtils; 050import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; 051import org.apache.hadoop.mapreduce.Job; 052import org.junit.AfterClass; 053import org.junit.Assert; 054import org.junit.BeforeClass; 055import org.junit.ClassRule; 056import org.junit.Test; 057import org.junit.experimental.categories.Category; 058import org.slf4j.Logger; 059import org.slf4j.LoggerFactory; 060 061import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; 062import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; 063 064@Category({ ReplicationTests.class, LargeTests.class }) 065public class TestVerifyReplicationCrossDiffHdfs { 066 @ClassRule 067 public static final HBaseClassTestRule CLASS_RULE = 068 HBaseClassTestRule.forClass(TestVerifyReplicationCrossDiffHdfs.class); 069 070 private static final Logger LOG = 071 LoggerFactory.getLogger(TestVerifyReplicationCrossDiffHdfs.class); 072 073 private static HBaseTestingUtility util1; 074 private static HBaseTestingUtility util2; 075 private static HBaseTestingUtility mapReduceUtil = new HBaseTestingUtility(); 076 077 private static Configuration conf1 = HBaseConfiguration.create(); 078 private static Configuration conf2; 079 080 private static final byte[] FAMILY = Bytes.toBytes("f"); 081 private static final byte[] QUALIFIER = Bytes.toBytes("q"); 082 private static final String PEER_ID = "1"; 083 private static final TableName TABLE_NAME = TableName.valueOf("testVerifyRepCrossDiffHDFS"); 084 085 @BeforeClass 086 public static void setUpBeforeClass() throws Exception { 087 conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); 088 util1 = new HBaseTestingUtility(conf1); 089 util1.startMiniZKCluster(); 090 MiniZooKeeperCluster miniZK = util1.getZkCluster(); 091 conf1 = util1.getConfiguration(); 092 093 conf2 = HBaseConfiguration.create(conf1); 094 conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); 095 util2 = new HBaseTestingUtility(conf2); 096 util2.setZkCluster(miniZK); 097 098 util1.startMiniCluster(); 099 util2.startMiniCluster(); 100 101 createTestingTable(util1.getAdmin()); 102 createTestingTable(util2.getAdmin()); 103 addTestingPeer(); 104 105 LOG.info("Start to load some data to source cluster."); 106 loadSomeData(); 107 108 LOG.info("Start mini MapReduce cluster."); 109 mapReduceUtil.setZkCluster(miniZK); 110 mapReduceUtil.startMiniMapReduceCluster(); 111 } 112 113 private static void createTestingTable(Admin admin) throws IOException { 114 TableDescriptor table = TableDescriptorBuilder.newBuilder(TABLE_NAME) 115 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(100) 116 .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) 117 .build(); 118 admin.createTable(table); 119 } 120 121 private static void addTestingPeer() throws IOException { 122 ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() 123 .setClusterKey(util2.getClusterKey()).setReplicateAllUserTables(false) 124 .setTableCFsMap(ImmutableMap.of(TABLE_NAME, ImmutableList.of())).build(); 125 util1.getAdmin().addReplicationPeer(PEER_ID, rpc); 126 } 127 128 private static void loadSomeData() throws IOException, InterruptedException { 129 int numOfRows = 10; 130 try (Table table = util1.getConnection().getTable(TABLE_NAME)) { 131 for (int i = 0; i < numOfRows; i++) { 132 table.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i))); 133 } 134 } 135 // Wait some time until the peer received those rows. 136 Result[] results = null; 137 try (Table table = util2.getConnection().getTable(TABLE_NAME)) { 138 for (int i = 0; i < 100; i++) { 139 try (ResultScanner rs = table.getScanner(new Scan())) { 140 results = rs.next(numOfRows); 141 if (results == null || results.length < numOfRows) { 142 LOG.info("Retrying, wait until the peer received all the rows, currentRows:" 143 + (results == null ? 0 : results.length)); 144 Thread.sleep(100); 145 } 146 } 147 } 148 } 149 Assert.assertNotNull(results); 150 Assert.assertEquals(10, results.length); 151 } 152 153 @AfterClass 154 public static void tearDownClass() throws Exception { 155 if (mapReduceUtil != null) { 156 mapReduceUtil.shutdownMiniCluster(); 157 } 158 if (util2 != null) { 159 util2.shutdownMiniCluster(); 160 } 161 if (util1 != null) { 162 util1.shutdownMiniCluster(); 163 } 164 } 165 166 @Test 167 public void testVerifyRepBySnapshot() throws Exception { 168 Path rootDir = FSUtils.getRootDir(conf1); 169 FileSystem fs = rootDir.getFileSystem(conf1); 170 String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis(); 171 SnapshotTestingUtils.createSnapshotAndValidate(util1.getAdmin(), TABLE_NAME, new String(FAMILY), 172 sourceSnapshotName, rootDir, fs, true); 173 174 // Take target snapshot 175 Path peerRootDir = FSUtils.getRootDir(conf2); 176 FileSystem peerFs = peerRootDir.getFileSystem(conf2); 177 String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis(); 178 SnapshotTestingUtils.createSnapshotAndValidate(util2.getAdmin(), TABLE_NAME, new String(FAMILY), 179 peerSnapshotName, peerRootDir, peerFs, true); 180 181 String peerFSAddress = peerFs.getUri().toString(); 182 String temPath1 = new Path(fs.getUri().toString(), "/tmp1").toString(); 183 String temPath2 = "/tmp2"; 184 185 String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, 186 "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, 187 "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, 188 "--peerHBaseRootAddress=" + FSUtils.getRootDir(conf2), PEER_ID, TABLE_NAME.toString() }; 189 190 // Use the yarn's config override the source cluster's config. 191 Configuration newConf = HBaseConfiguration.create(conf1); 192 HBaseConfiguration.merge(newConf, mapReduceUtil.getConfiguration()); 193 newConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); 194 CommonFSUtils.setRootDir(newConf, CommonFSUtils.getRootDir(conf1)); 195 Job job = new VerifyReplication().createSubmittableJob(newConf, args); 196 if (job == null) { 197 fail("Job wasn't created, see the log"); 198 } 199 if (!job.waitForCompletion(true)) { 200 fail("Job failed, see the log"); 201 } 202 assertEquals(10, 203 job.getCounters().findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); 204 assertEquals(0, 205 job.getCounters().findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); 206 } 207}