001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.junit.Assert.assertFalse;
021import static org.junit.Assert.assertNotNull;
022import static org.junit.Assert.assertTrue;
023
024import java.io.IOException;
025import java.util.List;
026import java.util.Optional;
027import org.apache.hadoop.conf.Configuration;
028import org.apache.hadoop.fs.FileSystem;
029import org.apache.hadoop.fs.Path;
030import org.apache.hadoop.hbase.HBaseClassTestRule;
031import org.apache.hadoop.hbase.HBaseTestingUtility;
032import org.apache.hadoop.hbase.HConstants;
033import org.apache.hadoop.hbase.MiniHBaseCluster;
034import org.apache.hadoop.hbase.TableName;
035import org.apache.hadoop.hbase.client.Admin;
036import org.apache.hadoop.hbase.client.Durability;
037import org.apache.hadoop.hbase.client.Put;
038import org.apache.hadoop.hbase.client.RegionInfo;
039import org.apache.hadoop.hbase.client.Table;
040import org.apache.hadoop.hbase.client.TableDescriptor;
041import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
042import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
043import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
044import org.apache.hadoop.hbase.coprocessor.ObserverContext;
045import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
046import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
047import org.apache.hadoop.hbase.coprocessor.RegionObserver;
048import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor;
049import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
050import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
051import org.apache.hadoop.hbase.master.HMaster;
052import org.apache.hadoop.hbase.testclassification.MediumTests;
053import org.apache.hadoop.hbase.testclassification.RegionServerTests;
054import org.apache.hadoop.hbase.util.Bytes;
055import org.apache.hadoop.hbase.util.JVMClusterUtil;
056import org.apache.hadoop.hbase.wal.WAL;
057import org.apache.hadoop.hbase.wal.WALEdit;
058import org.apache.hadoop.hdfs.DFSConfigKeys;
059import org.apache.hadoop.hdfs.MiniDFSCluster;
060import org.junit.After;
061import org.junit.Before;
062import org.junit.ClassRule;
063import org.junit.Test;
064import org.junit.experimental.categories.Category;
065import org.slf4j.Logger;
066import org.slf4j.LoggerFactory;
067
068/**
069 * Tests around regionserver shutdown and abort
070 */
071@Category({RegionServerTests.class, MediumTests.class})
072public class TestRegionServerAbort {
073
074  @ClassRule
075  public static final HBaseClassTestRule CLASS_RULE =
076      HBaseClassTestRule.forClass(TestRegionServerAbort.class);
077
078  private static final byte[] FAMILY_BYTES = Bytes.toBytes("f");
079
080  private static final Logger LOG = LoggerFactory.getLogger(TestRegionServerAbort.class);
081
082  private HBaseTestingUtility testUtil;
083  private Configuration conf;
084  private MiniDFSCluster dfsCluster;
085  private MiniHBaseCluster cluster;
086
087  @Before
088  public void setup() throws Exception {
089    testUtil = new HBaseTestingUtility();
090    conf = testUtil.getConfiguration();
091    conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY,
092        StopBlockingRegionObserver.class.getName());
093    conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
094        StopBlockingRegionObserver.class.getName());
095    // make sure we have multiple blocks so that the client does not prefetch all block locations
096    conf.set("dfs.blocksize", Long.toString(100 * 1024));
097    // prefetch the first block
098    conf.set(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, Long.toString(100 * 1024));
099    conf.set(HConstants.REGION_IMPL, ErrorThrowingHRegion.class.getName());
100
101    testUtil.startMiniZKCluster();
102    dfsCluster = testUtil.startMiniDFSCluster(2);
103    cluster = testUtil.startMiniHBaseCluster(1, 2);
104  }
105
106  @After
107  public void tearDown() throws Exception {
108    String className = StopBlockingRegionObserver.class.getName();
109    for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
110      HRegionServer rs = t.getRegionServer();
111      RegionServerCoprocessorHost cpHost = rs.getRegionServerCoprocessorHost();
112      StopBlockingRegionObserver cp = (StopBlockingRegionObserver)cpHost.findCoprocessor(className);
113      cp.setStopAllowed(true);
114    }
115    HMaster master = cluster.getMaster();
116    RegionServerCoprocessorHost host = master.getRegionServerCoprocessorHost();
117    if (host != null) {
118      StopBlockingRegionObserver obs = (StopBlockingRegionObserver) host.findCoprocessor(className);
119      if (obs != null) obs.setStopAllowed(true);
120    }
121    testUtil.shutdownMiniCluster();
122  }
123
124  /**
125   * Test that a regionserver is able to abort properly, even when a coprocessor
126   * throws an exception in preStopRegionServer().
127   */
128  @Test
129  public void testAbortFromRPC() throws Exception {
130    TableName tableName = TableName.valueOf("testAbortFromRPC");
131    // create a test table
132    Table table = testUtil.createTable(tableName, FAMILY_BYTES);
133
134    // write some edits
135    testUtil.loadTable(table, FAMILY_BYTES);
136    LOG.info("Wrote data");
137    // force a flush
138    cluster.flushcache(tableName);
139    LOG.info("Flushed table");
140
141    // Send a poisoned put to trigger the abort
142    Put put = new Put(new byte[]{0, 0, 0, 0});
143    put.addColumn(FAMILY_BYTES, Bytes.toBytes("c"), new byte[]{});
144    put.setAttribute(StopBlockingRegionObserver.DO_ABORT, new byte[]{1});
145
146    List<HRegion> regions = cluster.findRegionsForTable(tableName);
147    HRegion firstRegion = cluster.findRegionsForTable(tableName).get(0);
148    table.put(put);
149    // Verify that the regionserver is stopped
150    assertNotNull(firstRegion);
151    assertNotNull(firstRegion.getRegionServerServices());
152    LOG.info("isAborted = " + firstRegion.getRegionServerServices().isAborted());
153    assertTrue(firstRegion.getRegionServerServices().isAborted());
154    LOG.info("isStopped = " + firstRegion.getRegionServerServices().isStopped());
155    assertTrue(firstRegion.getRegionServerServices().isStopped());
156  }
157
158  /**
159   * Test that a coprocessor is able to override a normal regionserver stop request.
160   */
161  @Test
162  public void testStopOverrideFromCoprocessor() throws Exception {
163    Admin admin = testUtil.getHBaseAdmin();
164    HRegionServer regionserver = cluster.getRegionServer(0);
165    admin.stopRegionServer(regionserver.getServerName().getHostAndPort());
166
167    // regionserver should have failed to stop due to coprocessor
168    assertFalse(cluster.getRegionServer(0).isAborted());
169    assertFalse(cluster.getRegionServer(0).isStopped());
170  }
171
172  @CoreCoprocessor
173  public static class StopBlockingRegionObserver
174      implements RegionServerCoprocessor, RegionCoprocessor, RegionServerObserver, RegionObserver {
175    public static final String DO_ABORT = "DO_ABORT";
176    private boolean stopAllowed;
177
178    @Override
179    public Optional<RegionObserver> getRegionObserver() {
180      return Optional.of(this);
181    }
182
183    @Override
184    public Optional<RegionServerObserver> getRegionServerObserver() {
185      return Optional.of(this);
186    }
187
188    @Override
189    public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit,
190                       Durability durability) throws IOException {
191      if (put.getAttribute(DO_ABORT) != null) {
192        // TODO: Change this so it throws a CP Abort Exception instead.
193        RegionServerServices rss =
194            ((HasRegionServerServices)c.getEnvironment()).getRegionServerServices();
195        String str = "Aborting for test";
196        LOG.info(str  + " " + rss.getServerName());
197        rss.abort(str, new Throwable(str));
198      }
199    }
200
201    @Override
202    public void preStopRegionServer(ObserverContext<RegionServerCoprocessorEnvironment> env)
203        throws IOException {
204      if (!stopAllowed) {
205        throw new IOException("Stop not allowed");
206      }
207    }
208
209    public void setStopAllowed(boolean allowed) {
210      this.stopAllowed = allowed;
211    }
212  }
213
214  /**
215   * Throws an exception during store file refresh in order to trigger a regionserver abort.
216   */
217  public static class ErrorThrowingHRegion extends HRegion {
218    public ErrorThrowingHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration confParam,
219                                RegionInfo regionInfo, TableDescriptor htd,
220                                RegionServerServices rsServices) {
221      super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices);
222    }
223
224    public ErrorThrowingHRegion(HRegionFileSystem fs, WAL wal, Configuration confParam,
225                                TableDescriptor htd, RegionServerServices rsServices) {
226      super(fs, wal, confParam, htd, rsServices);
227    }
228
229    @Override
230    protected boolean refreshStoreFiles(boolean force) throws IOException {
231      // forced when called through RegionScannerImpl.handleFileNotFound()
232      if (force) {
233        throw new IOException("Failing file refresh for testing");
234      }
235      return super.refreshStoreFiles(force);
236    }
237  }
238}