001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.coprocessor;
019
020import static org.junit.Assert.assertFalse;
021import static org.junit.Assert.assertTrue;
022import static org.junit.Assert.fail;
023
024import java.io.IOException;
025import org.apache.hadoop.conf.Configuration;
026import org.apache.hadoop.hbase.HBaseClassTestRule;
027import org.apache.hadoop.hbase.HBaseTestingUtility;
028import org.apache.hadoop.hbase.TableName;
029import org.apache.hadoop.hbase.client.Durability;
030import org.apache.hadoop.hbase.client.Put;
031import org.apache.hadoop.hbase.client.Table;
032import org.apache.hadoop.hbase.regionserver.HRegionServer;
033import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
034import org.apache.hadoop.hbase.testclassification.MediumTests;
035import org.apache.hadoop.hbase.util.Bytes;
036import org.apache.hadoop.hbase.wal.WALEdit;
037import org.junit.AfterClass;
038import org.junit.BeforeClass;
039import org.junit.ClassRule;
040import org.junit.Test;
041import org.junit.experimental.categories.Category;
042
043/**
044 * Tests unhandled exceptions thrown by coprocessors running on regionserver. Expected result is
045 * that the region server will remove the buggy coprocessor from its set of coprocessors and throw a
046 * org.apache.hadoop.hbase.exceptions.DoNotRetryIOException back to the client. (HBASE-4014).
047 */
048@Category({ CoprocessorTests.class, MediumTests.class })
049public class TestRegionServerCoprocessorExceptionWithRemove {
050
051  @ClassRule
052  public static final HBaseClassTestRule CLASS_RULE =
053    HBaseClassTestRule.forClass(TestRegionServerCoprocessorExceptionWithRemove.class);
054
055  public static class BuggyRegionObserver extends SimpleRegionObserver {
056    @SuppressWarnings("null")
057    @Override
058    public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c, final Put put,
059      final WALEdit edit, final Durability durability) {
060      String tableName =
061        c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
062      if (tableName.equals("observed_table")) {
063        // Trigger a NPE to fail the coprocessor
064        Integer i = null;
065        i = i + 1;
066      }
067    }
068  }
069
070  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
071
072  @BeforeClass
073  public static void setupBeforeClass() throws Exception {
074    // set configure to indicate which cp should be loaded
075    Configuration conf = TEST_UTIL.getConfiguration();
076    conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, BuggyRegionObserver.class.getName());
077    TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false);
078    TEST_UTIL.startMiniCluster();
079  }
080
081  @AfterClass
082  public static void teardownAfterClass() throws Exception {
083    TEST_UTIL.shutdownMiniCluster();
084  }
085
086  @Test
087  public void testExceptionFromCoprocessorDuringPut() throws IOException, InterruptedException {
088    // Set watches on the zookeeper nodes for all of the regionservers in the
089    // cluster. When we try to write to TEST_TABLE, the buggy coprocessor will
090    // cause a NullPointerException, which will cause the regionserver (which
091    // hosts the region we attempted to write to) to abort. In turn, this will
092    // cause the nodeDeleted() method of the DeadRegionServer tracker to
093    // execute, which will set the rsZKNodeDeleted flag to true, which will
094    // pass this test.
095
096    TableName TEST_TABLE = TableName.valueOf("observed_table");
097    byte[] TEST_FAMILY = Bytes.toBytes("aaa");
098
099    Table table = TEST_UTIL.createMultiRegionTable(TEST_TABLE, TEST_FAMILY);
100    TEST_UTIL.waitUntilAllRegionsAssigned(TEST_TABLE);
101    // Note which regionServer that should survive the buggy coprocessor's
102    // prePut().
103    HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(TEST_TABLE);
104
105    boolean threwIOE = false;
106    try {
107      final byte[] ROW = Bytes.toBytes("aaa");
108      Put put = new Put(ROW);
109      put.addColumn(TEST_FAMILY, ROW, ROW);
110      table.put(put);
111      // We may need two puts to reliably get an exception
112      table.put(put);
113    } catch (IOException e) {
114      threwIOE = true;
115    } finally {
116      assertTrue("The regionserver should have thrown an exception", threwIOE);
117    }
118
119    // Wait 10 seconds for the regionserver to abort: expected result is that
120    // it will survive and not abort.
121    for (int i = 0; i < 10; i++) {
122      assertFalse(regionServer.isAborted());
123      try {
124        Thread.sleep(1000);
125      } catch (InterruptedException e) {
126        fail("InterruptedException while waiting for regionserver " + "zk node to be deleted.");
127      }
128    }
129    table.close();
130  }
131
132}