001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase; 019 020import java.io.IOException; 021import java.util.List; 022import java.util.concurrent.BlockingQueue; 023import java.util.concurrent.TimeUnit; 024import org.apache.hadoop.conf.Configuration; 025import org.apache.hadoop.hbase.testclassification.IntegrationTests; 026import org.apache.hadoop.hbase.util.ConstantDelayQueue; 027import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 028import org.apache.hadoop.hbase.util.LoadTestTool; 029import org.apache.hadoop.hbase.util.MultiThreadedUpdater; 030import org.apache.hadoop.hbase.util.MultiThreadedWriter; 031import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; 032import org.apache.hadoop.hbase.util.TableDescriptorChecker; 033import org.apache.hadoop.hbase.util.Threads; 034import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; 035import org.apache.hadoop.util.StringUtils; 036import org.apache.hadoop.util.ToolRunner; 037import org.junit.Assert; 038import org.junit.Test; 039import org.junit.experimental.categories.Category; 040 041import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 042 043/** 044 * Integration test for testing async wal replication to secondary region replicas. Sets up a table 045 * with given region replication (default 2), and uses LoadTestTool client writer, updater and 046 * reader threads for writes and reads and verification. It uses a delay queue with a given delay 047 * ("read_delay_ms", default 5000ms) between the writer/updater and reader threads to make the 048 * written items available to readers. This means that a reader will only start reading from a row 049 * written by the writer / updater after 5secs has passed. The reader thread performs the reads from 050 * the given region replica id (default 1) to perform the reads. Async wal replication has to finish 051 * with the replication of the edits before read_delay_ms to the given region replica id so that the 052 * read and verify will not fail. The job will run for <b>at least</b> given runtime (default 10min) 053 * by running a concurrent writer and reader workload followed by a concurrent updater and reader 054 * workload for num_keys_per_server. 055 * <p> 056 * Example usage: 057 * </p> 058 * 059 * <pre> 060 * hbase org.apache.hadoop.hbase.IntegrationTestRegionReplicaReplication 061 * -DIntegrationTestRegionReplicaReplication.num_keys_per_server=10000 062 * -Dhbase.IntegrationTestRegionReplicaReplication.runtime=600000 063 * -DIntegrationTestRegionReplicaReplication.read_delay_ms=5000 064 * -DIntegrationTestRegionReplicaReplication.region_replication=3 065 * -DIntegrationTestRegionReplicaReplication.region_replica_id=2 066 * -DIntegrationTestRegionReplicaReplication.num_read_threads=100 067 * -DIntegrationTestRegionReplicaReplication.num_write_threads=100 068 * </pre> 069 */ 070@Category(IntegrationTests.class) 071public class IntegrationTestRegionReplicaReplication extends IntegrationTestIngest { 072 073 private static final String TEST_NAME = 074 IntegrationTestRegionReplicaReplication.class.getSimpleName(); 075 076 private static final String OPT_READ_DELAY_MS = "read_delay_ms"; 077 078 private static final int DEFAULT_REGION_REPLICATION = 2; 079 private static final int SERVER_COUNT = 1; // number of slaves for the smallest cluster 080 private static final String[] DEFAULT_COLUMN_FAMILIES = new String[] { "f1", "f2", "f3" }; 081 082 @Override 083 protected int getMinServerCount() { 084 return SERVER_COUNT; 085 } 086 087 @Override 088 public void setConf(Configuration conf) { 089 conf.setIfUnset(String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_REGION_REPLICATION), 090 String.valueOf(DEFAULT_REGION_REPLICATION)); 091 092 conf.setIfUnset(String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_COLUMN_FAMILIES), 093 StringUtils.join(",", DEFAULT_COLUMN_FAMILIES)); 094 095 conf.setBoolean(TableDescriptorChecker.TABLE_SANITY_CHECKS, true); 096 097 // enable async wal replication to region replicas for unit tests 098 conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true); 099 100 conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024L * 1024 * 4); // flush every 4 MB 101 conf.setInt("hbase.hstore.blockingStoreFiles", 100); 102 103 super.setConf(conf); 104 } 105 106 @Override 107 @Test 108 public void testIngest() throws Exception { 109 runIngestTest(JUNIT_RUN_TIME, 25000, 10, 1024, 10, 20); 110 } 111 112 /** 113 * This extends MultiThreadedWriter to add a configurable delay to the keys written by the writer 114 * threads to become available to the MultiThradedReader threads. We add this delay because of the 115 * async nature of the wal replication to region replicas. 116 */ 117 public static class DelayingMultiThreadedWriter extends MultiThreadedWriter { 118 private long delayMs; 119 120 public DelayingMultiThreadedWriter(LoadTestDataGenerator dataGen, Configuration conf, 121 TableName tableName) throws IOException { 122 super(dataGen, conf, tableName); 123 } 124 125 @Override 126 protected BlockingQueue<Long> createWriteKeysQueue(Configuration conf) { 127 this.delayMs = conf.getLong(String.format("%s.%s", 128 IntegrationTestRegionReplicaReplication.class.getSimpleName(), OPT_READ_DELAY_MS), 5000); 129 return new ConstantDelayQueue<>(TimeUnit.MILLISECONDS, delayMs); 130 } 131 } 132 133 /** 134 * This extends MultiThreadedWriter to add a configurable delay to the keys written by the writer 135 * threads to become available to the MultiThradedReader threads. We add this delay because of the 136 * async nature of the wal replication to region replicas. 137 */ 138 public static class DelayingMultiThreadedUpdater extends MultiThreadedUpdater { 139 private long delayMs; 140 141 public DelayingMultiThreadedUpdater(LoadTestDataGenerator dataGen, Configuration conf, 142 TableName tableName, double updatePercent) throws IOException { 143 super(dataGen, conf, tableName, updatePercent); 144 } 145 146 @Override 147 protected BlockingQueue<Long> createWriteKeysQueue(Configuration conf) { 148 this.delayMs = conf.getLong(String.format("%s.%s", 149 IntegrationTestRegionReplicaReplication.class.getSimpleName(), OPT_READ_DELAY_MS), 5000); 150 return new ConstantDelayQueue<>(TimeUnit.MILLISECONDS, delayMs); 151 } 152 } 153 154 @Override 155 protected void runIngestTest(long defaultRunTime, long keysPerServerPerIter, int colsPerKey, 156 int recordSize, int writeThreads, int readThreads) throws Exception { 157 158 LOG.info("Running ingest"); 159 LOG.info("Cluster size:" 160 + util.getHBaseClusterInterface().getClusterMetrics().getLiveServerMetrics().size()); 161 162 // sleep for some time so that the cache for disabled tables does not interfere. 163 Threads.sleep(getConf().getInt( 164 "hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs", 5000) + 1000); 165 166 long start = EnvironmentEdgeManager.currentTime(); 167 String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName()); 168 long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime); 169 long startKey = 0; 170 171 long numKeys = getNumKeys(keysPerServerPerIter); 172 while (EnvironmentEdgeManager.currentTime() - start < 0.9 * runtime) { 173 LOG.info("Intended run time: " + (runtime / 60000) + " min, left:" 174 + ((runtime - (EnvironmentEdgeManager.currentTime() - start)) / 60000) + " min"); 175 176 int verifyPercent = 100; 177 int updatePercent = 20; 178 int regionReplicaId = 179 conf.getInt(String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_REGION_REPLICA_ID), 1); 180 181 // we will run writers and readers at the same time. 182 List<String> args = Lists.newArrayList(getArgsForLoadTestTool("", "", startKey, numKeys)); 183 args.add("-write"); 184 args.add(String.format("%d:%d:%d", colsPerKey, recordSize, writeThreads)); 185 args.add("-" + LoadTestTool.OPT_MULTIPUT); 186 args.add("-writer"); 187 args.add(DelayingMultiThreadedWriter.class.getName()); // inject writer class 188 args.add("-read"); 189 args.add(String.format("%d:%d", verifyPercent, readThreads)); 190 args.add("-" + LoadTestTool.OPT_REGION_REPLICA_ID); 191 args.add(String.valueOf(regionReplicaId)); 192 193 int ret = loadTool.run(args.toArray(new String[args.size()])); 194 if (0 != ret) { 195 String errorMsg = "Load failed with error code " + ret; 196 LOG.error(errorMsg); 197 Assert.fail(errorMsg); 198 } 199 200 args = Lists.newArrayList(getArgsForLoadTestTool("", "", startKey, numKeys)); 201 args.add("-update"); 202 args.add(String.format("%s:%s:1", updatePercent, writeThreads)); 203 args.add("-updater"); 204 args.add(DelayingMultiThreadedUpdater.class.getName()); // inject updater class 205 args.add("-read"); 206 args.add(String.format("%d:%d", verifyPercent, readThreads)); 207 args.add("-" + LoadTestTool.OPT_REGION_REPLICA_ID); 208 args.add(String.valueOf(regionReplicaId)); 209 210 ret = loadTool.run(args.toArray(new String[args.size()])); 211 if (0 != ret) { 212 String errorMsg = "Load failed with error code " + ret; 213 LOG.error(errorMsg); 214 Assert.fail(errorMsg); 215 } 216 startKey += numKeys; 217 } 218 } 219 220 public static void main(String[] args) throws Exception { 221 Configuration conf = HBaseConfiguration.create(); 222 IntegrationTestingUtility.setUseDistributedCluster(conf); 223 int ret = ToolRunner.run(conf, new IntegrationTestRegionReplicaReplication(), args); 224 System.exit(ret); 225 } 226}