001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT;
021import static org.apache.hadoop.hbase.master.LoadBalancer.TABLES_ON_MASTER;
022import static org.junit.Assert.assertEquals;
023
024import java.io.IOException;
025import java.util.ArrayList;
026import java.util.Arrays;
027import java.util.Collections;
028import java.util.List;
029import java.util.Random;
030import java.util.concurrent.ExecutionException;
031import java.util.concurrent.ExecutorService;
032import java.util.concurrent.Executors;
033import java.util.concurrent.Future;
034import java.util.concurrent.TimeUnit;
035import java.util.concurrent.atomic.AtomicBoolean;
036import java.util.stream.Collectors;
037import java.util.stream.IntStream;
038import org.apache.commons.io.IOUtils;
039import org.apache.hadoop.hbase.HBaseClassTestRule;
040import org.apache.hadoop.hbase.HBaseTestingUtility;
041import org.apache.hadoop.hbase.MemoryCompactionPolicy;
042import org.apache.hadoop.hbase.ServerName;
043import org.apache.hadoop.hbase.TableName;
044import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
045import org.apache.hadoop.hbase.io.ByteBufferPool;
046import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
047import org.apache.hadoop.hbase.regionserver.HRegion;
048import org.apache.hadoop.hbase.testclassification.ClientTests;
049import org.apache.hadoop.hbase.testclassification.LargeTests;
050import org.apache.hadoop.hbase.util.Bytes;
051import org.apache.hadoop.hbase.util.RetryCounter;
052import org.apache.hadoop.hbase.util.Threads;
053import org.junit.AfterClass;
054import org.junit.BeforeClass;
055import org.junit.ClassRule;
056import org.junit.Test;
057import org.junit.experimental.categories.Category;
058import org.slf4j.Logger;
059import org.slf4j.LoggerFactory;
060
061/**
062 * Will split the table, and move region randomly when testing.
063 */
064@Category({ LargeTests.class, ClientTests.class })
065public class TestAsyncTableGetMultiThreaded {
066
067  @ClassRule
068  public static final HBaseClassTestRule CLASS_RULE =
069    HBaseClassTestRule.forClass(TestAsyncTableGetMultiThreaded.class);
070
071  private static final Logger LOG = LoggerFactory.getLogger(TestAsyncTableGetMultiThreaded.class);
072
073  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
074
075  private static TableName TABLE_NAME = TableName.valueOf("async");
076
077  private static byte[] FAMILY = Bytes.toBytes("cf");
078
079  private static byte[] QUALIFIER = Bytes.toBytes("cq");
080
081  private static int COUNT = 1000;
082
083  private static AsyncConnection CONN;
084
085  private static AsyncTable<?> TABLE;
086
087  private static byte[][] SPLIT_KEYS;
088
089  @BeforeClass
090  public static void setUp() throws Exception {
091    setUp(MemoryCompactionPolicy.NONE);
092  }
093
094  protected static void setUp(MemoryCompactionPolicy memoryCompaction) throws Exception {
095    TEST_UTIL.getConfiguration().set(TABLES_ON_MASTER, "none");
096    TEST_UTIL.getConfiguration().setLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, 60000L);
097    TEST_UTIL.getConfiguration().setInt(ByteBufferPool.MAX_POOL_SIZE_KEY, 100);
098    TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
099      String.valueOf(memoryCompaction));
100
101    TEST_UTIL.startMiniCluster(5);
102    SPLIT_KEYS = new byte[8][];
103    for (int i = 111; i < 999; i += 111) {
104      SPLIT_KEYS[i / 111 - 1] = Bytes.toBytes(String.format("%03d", i));
105    }
106    TEST_UTIL.createTable(TABLE_NAME, FAMILY);
107    TEST_UTIL.waitTableAvailable(TABLE_NAME);
108    CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
109    TABLE = CONN.getTableBuilder(TABLE_NAME).setReadRpcTimeout(1, TimeUnit.SECONDS)
110        .setMaxRetries(1000).build();
111    TABLE.putAll(
112      IntStream.range(0, COUNT).mapToObj(i -> new Put(Bytes.toBytes(String.format("%03d", i)))
113          .addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i))).collect(Collectors.toList()))
114        .get();
115  }
116
117  @AfterClass
118  public static void tearDown() throws Exception {
119    IOUtils.closeQuietly(CONN);
120    TEST_UTIL.shutdownMiniCluster();
121  }
122
123  private void run(AtomicBoolean stop) throws InterruptedException, ExecutionException {
124    while (!stop.get()) {
125      for (int i = 0; i < COUNT; i++) {
126        assertEquals(i, Bytes.toInt(TABLE.get(new Get(Bytes.toBytes(String.format("%03d", i))))
127            .get().getValue(FAMILY, QUALIFIER)));
128      }
129      // sleep a bit so we do not add to much load to the test machine as we have 20 threads here
130      Thread.sleep(10);
131    }
132  }
133
134  @Test
135  public void test() throws Exception {
136    LOG.info("====== Test started ======");
137    int numThreads = 20;
138    AtomicBoolean stop = new AtomicBoolean(false);
139    ExecutorService executor =
140      Executors.newFixedThreadPool(numThreads, Threads.newDaemonThreadFactory("TestAsyncGet-"));
141    List<Future<?>> futures = new ArrayList<>();
142    IntStream.range(0, numThreads).forEach(i -> futures.add(executor.submit(() -> {
143      run(stop);
144      return null;
145    })));
146    LOG.info("====== Scheduled {} read threads ======", numThreads);
147    Collections.shuffle(Arrays.asList(SPLIT_KEYS), new Random(123));
148    Admin admin = TEST_UTIL.getAdmin();
149    for (byte[] splitPoint : SPLIT_KEYS) {
150      int oldRegionCount = admin.getRegions(TABLE_NAME).size();
151      LOG.info("====== Splitting at {} ======, region count before splitting is {}",
152        Bytes.toStringBinary(splitPoint), oldRegionCount);
153      admin.split(TABLE_NAME, splitPoint);
154      TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
155        @Override
156        public boolean evaluate() throws Exception {
157          return TEST_UTIL.getMiniHBaseCluster().getRegions(TABLE_NAME).size() > oldRegionCount;
158        }
159
160        @Override
161        public String explainFailure() throws Exception {
162          return "Split has not finished yet";
163        }
164      });
165      List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(TABLE_NAME);
166      LOG.info("====== Split at {} ======, region count after splitting is {}",
167        Bytes.toStringBinary(splitPoint), regions.size());
168      for (HRegion region : regions) {
169        LOG.info("====== Compact {} ======", region.getRegionInfo());
170        region.compact(true);
171      }
172      for (HRegion region : regions) {
173        // Waiting for compaction to complete and references are cleaned up
174        LOG.info("====== Waiting for compaction on {} ======", region.getRegionInfo());
175        RetryCounter retrier = new RetryCounter(30, 1, TimeUnit.SECONDS);
176        for (;;) {
177          try {
178            if (admin.getCompactionStateForRegion(
179              region.getRegionInfo().getRegionName()) == CompactionState.NONE) {
180              break;
181            }
182          } catch (IOException e) {
183            LOG.warn("Failed to query");
184          }
185          if (!retrier.shouldRetry()) {
186            throw new IOException("Can not finish compaction in time after attempt " +
187              retrier.getAttemptTimes() + " times");
188          }
189          retrier.sleepUntilNextRetry();
190        }
191        LOG.info("====== Compaction on {} finished, close and archive compacted files ======",
192          region.getRegionInfo());
193        region.getStores().get(0).closeAndArchiveCompactedFiles();
194        LOG.info("====== Close and archive compacted files on {} done ======",
195          region.getRegionInfo());
196      }
197      Thread.sleep(5000);
198      LOG.info("====== Balancing cluster ======");
199      admin.balance(true);
200      LOG.info("====== Balance cluster done ======");
201      Thread.sleep(5000);
202      ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
203      ServerName newMetaServer = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream()
204          .map(t -> t.getRegionServer().getServerName()).filter(s -> !s.equals(metaServer))
205          .findAny().get();
206      LOG.info("====== Moving meta from {} to {} ======", metaServer, newMetaServer);
207      admin.move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), newMetaServer);
208      LOG.info("====== Move meta done ======");
209      Thread.sleep(5000);
210    }
211    LOG.info("====== Read test finished, shutdown thread pool ======");
212    stop.set(true);
213    executor.shutdown();
214    for (int i = 0; i < numThreads; i++) {
215      LOG.info("====== Waiting for {} threads to finish, remaining {} ======", numThreads,
216        numThreads - i);
217      futures.get(i).get();
218    }
219    LOG.info("====== Test test finished ======");
220  }
221}