001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
021import static org.junit.Assert.assertEquals;
022import static org.junit.Assert.assertTrue;
023
024import java.io.IOException;
025import org.apache.hadoop.hbase.HBaseClassTestRule;
026import org.apache.hadoop.hbase.HBaseTestingUtility;
027import org.apache.hadoop.hbase.HColumnDescriptor;
028import org.apache.hadoop.hbase.HTableDescriptor;
029import org.apache.hadoop.hbase.MiniHBaseCluster;
030import org.apache.hadoop.hbase.TableName;
031import org.apache.hadoop.hbase.client.Admin;
032import org.apache.hadoop.hbase.client.Connection;
033import org.apache.hadoop.hbase.client.ConnectionFactory;
034import org.apache.hadoop.hbase.client.Durability;
035import org.apache.hadoop.hbase.client.Get;
036import org.apache.hadoop.hbase.client.Put;
037import org.apache.hadoop.hbase.client.Result;
038import org.apache.hadoop.hbase.client.RowMutations;
039import org.apache.hadoop.hbase.client.Table;
040import org.apache.hadoop.hbase.testclassification.MediumTests;
041import org.apache.hadoop.hbase.testclassification.RegionServerTests;
042import org.apache.hadoop.hbase.util.Bytes;
043import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
044import org.junit.After;
045import org.junit.AfterClass;
046import org.junit.Before;
047import org.junit.BeforeClass;
048import org.junit.ClassRule;
049import org.junit.Test;
050import org.junit.experimental.categories.Category;
051
052@Category({ RegionServerTests.class, MediumTests.class })
053public class TestMutateRowsRecovery {
054
055  @ClassRule
056  public static final HBaseClassTestRule CLASS_RULE =
057      HBaseClassTestRule.forClass(TestMutateRowsRecovery.class);
058
059  private MiniHBaseCluster cluster = null;
060  private Connection connection = null;
061  private static final int NB_SERVERS = 3;
062
063  static final byte[] qual1 = Bytes.toBytes("qual1");
064  static final byte[] qual2 = Bytes.toBytes("qual2");
065  static final byte[] value1 = Bytes.toBytes("value1");
066  static final byte[] value2 = Bytes.toBytes("value2");
067  static final byte[] row1 = Bytes.toBytes("rowA");
068  static final byte[] row2 = Bytes.toBytes("rowB");
069
070  static final HBaseTestingUtility TESTING_UTIL = new HBaseTestingUtility();
071
072  @BeforeClass
073  public static void before() throws Exception {
074    TESTING_UTIL.startMiniCluster(NB_SERVERS);
075  }
076
077  @AfterClass
078  public static void after() throws Exception {
079    TESTING_UTIL.shutdownMiniCluster();
080  }
081
082  @Before
083  public void setup() throws IOException {
084    TESTING_UTIL.ensureSomeNonStoppedRegionServersAvailable(NB_SERVERS);
085    this.connection = ConnectionFactory.createConnection(TESTING_UTIL.getConfiguration());
086    this.cluster = TESTING_UTIL.getMiniHBaseCluster();
087  }
088
089  @After
090  public void tearDown() throws IOException {
091    if (this.connection != null) {
092      this.connection.close();
093    }
094  }
095
096  @Test
097  public void MutateRowsAndCheckPostKill() throws IOException, InterruptedException {
098    final TableName tableName = TableName.valueOf("test");
099    Admin admin = null;
100    Table hTable = null;
101    try {
102      admin = connection.getAdmin();
103      hTable = connection.getTable(tableName);
104      HTableDescriptor desc = new HTableDescriptor(tableName);
105      desc.addFamily(new HColumnDescriptor(fam1));
106      admin.createTable(desc);
107
108      // Add a multi
109      RowMutations rm = new RowMutations(row1);
110      Put p1 = new Put(row1);
111      p1.addColumn(fam1, qual1, value1);
112      p1.setDurability(Durability.SYNC_WAL);
113      rm.add(p1);
114      hTable.mutateRow(rm);
115
116      // Add a put
117      Put p2 = new Put(row1);
118      p2.addColumn(fam1, qual2, value2);
119      p2.setDurability(Durability.SYNC_WAL);
120      hTable.put(p2);
121
122      HRegionServer rs1 = TESTING_UTIL.getRSForFirstRegionInTable(tableName);
123      long now = EnvironmentEdgeManager.currentTime();
124      // Send the RS Load to ensure correct lastflushedseqid for stores
125      rs1.tryRegionServerReport(now - 30000, now);
126      // Kill the RS to trigger wal replay
127      cluster.killRegionServer(rs1.serverName);
128
129      // Ensure correct data exists
130      Get g1 = new Get(row1);
131      Result result = hTable.get(g1);
132      assertTrue(result.getValue(fam1, qual1) != null);
133      assertEquals(0, Bytes.compareTo(result.getValue(fam1, qual1), value1));
134      assertTrue(result.getValue(fam1, qual2) != null);
135      assertEquals(0, Bytes.compareTo(result.getValue(fam1, qual2), value2));
136    } finally {
137      if (admin != null) {
138        admin.close();
139      }
140      if (hTable != null) {
141        hTable.close();
142      }
143    }
144  }
145}