001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertFalse;
022import static org.junit.Assert.assertTrue;
023
024import java.io.IOException;
025import java.util.ArrayList;
026import java.util.List;
027import org.apache.hadoop.hbase.HBaseClassTestRule;
028import org.apache.hadoop.hbase.HBaseTestingUtil;
029import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
030import org.apache.hadoop.hbase.TableName;
031import org.apache.hadoop.hbase.client.Admin;
032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
033import org.apache.hadoop.hbase.client.Put;
034import org.apache.hadoop.hbase.client.RegionInfo;
035import org.apache.hadoop.hbase.client.RegionLocator;
036import org.apache.hadoop.hbase.client.Result;
037import org.apache.hadoop.hbase.client.ResultScanner;
038import org.apache.hadoop.hbase.client.Scan;
039import org.apache.hadoop.hbase.client.Table;
040import org.apache.hadoop.hbase.client.TableDescriptor;
041import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
042import org.apache.hadoop.hbase.master.HMaster;
043import org.apache.hadoop.hbase.testclassification.MediumTests;
044import org.apache.hadoop.hbase.testclassification.RegionServerTests;
045import org.apache.hadoop.hbase.util.Bytes;
046import org.junit.ClassRule;
047import org.junit.Rule;
048import org.junit.Test;
049import org.junit.experimental.categories.Category;
050import org.junit.rules.TestName;
051import org.slf4j.Logger;
052import org.slf4j.LoggerFactory;
053
054/**
055 * Tests that need to spin up a cluster testing an {@link HRegion}.  Use
056 * {@link TestHRegion} if you don't need a cluster, if you can test w/ a
057 * standalone {@link HRegion}.
058 */
059@Category({RegionServerTests.class, MediumTests.class})
060public class TestHRegionOnCluster {
061
062  @ClassRule
063  public static final HBaseClassTestRule CLASS_RULE =
064      HBaseClassTestRule.forClass(TestHRegionOnCluster.class);
065
066  private static final Logger LOG = LoggerFactory.getLogger(TestHRegionOnCluster.class);
067  private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
068
069  @Rule
070  public TestName name = new TestName();
071
072  @Test
073  public void testDataCorrectnessReplayingRecoveredEdits() throws Exception {
074    final int NUM_RS = 3;
075    Admin hbaseAdmin = null;
076    TEST_UTIL.startMiniCluster(NUM_RS);
077
078    try {
079      final TableName tableName = TableName.valueOf(name.getMethodName());
080      final byte[] FAMILY = Bytes.toBytes("family");
081      SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
082      HMaster master = cluster.getMaster();
083
084      // Create table
085      TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName)
086        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build();
087      hbaseAdmin = master.getConnection().getAdmin();
088      hbaseAdmin.createTable(tableDescriptor);
089
090      assertTrue(hbaseAdmin.isTableAvailable(tableName));
091
092      // Put data: r1->v1
093      LOG.info("Loading r1 to v1 into " + tableName);
094      Table table = TEST_UTIL.getConnection().getTable(tableName);
095      putDataAndVerify(table, "r1", FAMILY, "v1", 1);
096
097      TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
098      // Move region to target server
099
100      RegionInfo regionInfo;
101      try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
102        regionInfo = locator.getRegionLocation(Bytes.toBytes("r1")).getRegion();
103      }
104
105      int originServerNum = cluster.getServerWith(regionInfo.getRegionName());
106      HRegionServer originServer = cluster.getRegionServer(originServerNum);
107      int targetServerNum = (originServerNum + 1) % NUM_RS;
108      HRegionServer targetServer = cluster.getRegionServer(targetServerNum);
109      assertFalse(originServer.equals(targetServer));
110
111      TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
112      LOG.info("Moving " + regionInfo.getEncodedName() + " to " + targetServer.getServerName());
113      hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), targetServer.getServerName());
114      do {
115        Thread.sleep(1);
116      } while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum);
117
118      // Put data: r2->v2
119      LOG.info("Loading r2 to v2 into " + tableName);
120      putDataAndVerify(table, "r2", FAMILY, "v2", 2);
121
122      TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
123      // Move region to origin server
124      LOG.info("Moving " + regionInfo.getEncodedName() + " to " + originServer.getServerName());
125      hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), originServer.getServerName());
126      do {
127        Thread.sleep(1);
128      } while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum);
129
130      // Put data: r3->v3
131      LOG.info("Loading r3 to v3 into " + tableName);
132      putDataAndVerify(table, "r3", FAMILY, "v3", 3);
133
134      // Kill target server
135      LOG.info("Killing target server " + targetServer.getServerName());
136      targetServer.kill();
137      cluster.getRegionServerThreads().get(targetServerNum).join();
138      // Wait until finish processing of shutdown
139      while (master.getServerManager().areDeadServersInProgress()) {
140        Thread.sleep(5);
141      }
142      // Kill origin server
143      LOG.info("Killing origin server " + targetServer.getServerName());
144      originServer.kill();
145      cluster.getRegionServerThreads().get(originServerNum).join();
146
147      // Put data: r4->v4
148      LOG.info("Loading r4 to v4 into " + tableName);
149      putDataAndVerify(table, "r4", FAMILY, "v4", 4);
150
151    } finally {
152      if (hbaseAdmin != null) hbaseAdmin.close();
153      TEST_UTIL.shutdownMiniCluster();
154    }
155  }
156
157  private void putDataAndVerify(Table table, String row, byte[] family,
158      String value, int verifyNum) throws IOException {
159    System.out.println("=========Putting data :" + row);
160    Put put = new Put(Bytes.toBytes(row));
161    put.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes(value));
162    table.put(put);
163    ResultScanner resultScanner = table.getScanner(new Scan());
164    List<Result> results = new ArrayList<>();
165    while (true) {
166      Result r = resultScanner.next();
167      if (r == null)
168        break;
169      results.add(r);
170    }
171    resultScanner.close();
172    if (results.size() != verifyNum) {
173      System.out.println(results);
174    }
175    assertEquals(verifyNum, results.size());
176  }
177
178}