001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master.assignment;
019
020import static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionStateTransitionState.REGION_STATE_TRANSITION_CONFIRM_OPENED_VALUE;
021
022import java.io.IOException;
023import java.util.concurrent.CountDownLatch;
024import java.util.concurrent.Future;
025import java.util.concurrent.atomic.AtomicReference;
026import org.apache.hadoop.conf.Configuration;
027import org.apache.hadoop.hbase.HBaseClassTestRule;
028import org.apache.hadoop.hbase.HBaseTestingUtility;
029import org.apache.hadoop.hbase.HConstants;
030import org.apache.hadoop.hbase.PleaseHoldException;
031import org.apache.hadoop.hbase.TableName;
032import org.apache.hadoop.hbase.client.Put;
033import org.apache.hadoop.hbase.client.RegionInfo;
034import org.apache.hadoop.hbase.client.Table;
035import org.apache.hadoop.hbase.master.HMaster;
036import org.apache.hadoop.hbase.master.MasterServices;
037import org.apache.hadoop.hbase.master.RegionPlan;
038import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
039import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
040import org.apache.hadoop.hbase.testclassification.MasterTests;
041import org.apache.hadoop.hbase.testclassification.MediumTests;
042import org.apache.hadoop.hbase.util.Bytes;
043import org.apache.zookeeper.KeeperException;
044import org.junit.AfterClass;
045import org.junit.BeforeClass;
046import org.junit.ClassRule;
047import org.junit.Test;
048import org.junit.experimental.categories.Category;
049
050import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
051import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
052
053@Category({ MasterTests.class, MediumTests.class })
054public class TestReportRegionStateTransitionRetry {
055
056  @ClassRule
057  public static final HBaseClassTestRule CLASS_RULE =
058    HBaseClassTestRule.forClass(TestReportRegionStateTransitionRetry.class);
059
060  private static final AtomicReference<CountDownLatch> RESUME_AND_FAIL = new AtomicReference<>();
061
062  private static final class AssignmentManagerForTest extends AssignmentManager {
063
064    public AssignmentManagerForTest(MasterServices master) {
065      super(master);
066    }
067
068    @Override
069    public ReportRegionStateTransitionResponse reportRegionStateTransition(
070        ReportRegionStateTransitionRequest req) throws PleaseHoldException {
071      ReportRegionStateTransitionResponse resp = super.reportRegionStateTransition(req);
072      CountDownLatch latch = RESUME_AND_FAIL.getAndSet(null);
073      if (latch != null) {
074        try {
075          latch.await();
076        } catch (InterruptedException e) {
077          throw new RuntimeException(e);
078        }
079        throw new PleaseHoldException("Inject error");
080      }
081      return resp;
082    }
083  }
084
085  public static final class HMasterForTest extends HMaster {
086
087    public HMasterForTest(Configuration conf) throws IOException, KeeperException {
088      super(conf);
089    }
090
091    @Override
092    protected AssignmentManager createAssignmentManager(MasterServices master) {
093      return new AssignmentManagerForTest(master);
094    }
095  }
096
097  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
098
099  private static TableName NAME = TableName.valueOf("Retry");
100
101  private static byte[] CF = Bytes.toBytes("cf");
102
103  @BeforeClass
104  public static void setUp() throws Exception {
105    UTIL.getConfiguration().setClass(HConstants.MASTER_IMPL, HMasterForTest.class, HMaster.class);
106    UTIL.startMiniCluster(1);
107    UTIL.createTable(NAME, CF);
108    UTIL.waitTableAvailable(NAME);
109  }
110
111  @AfterClass
112  public static void tearDown() throws Exception {
113    UTIL.shutdownMiniCluster();
114  }
115
116  @Test
117  public void testRetryOnClose() throws Exception {
118    RegionInfo region = UTIL.getMiniHBaseCluster().getRegions(NAME).get(0).getRegionInfo();
119    ProcedureExecutor<MasterProcedureEnv> procExec =
120      UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
121    AssignmentManager am = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager();
122    RegionStateNode rsn = am.getRegionStates().getRegionStateNode(region);
123
124    CountDownLatch latch = new CountDownLatch(1);
125    RESUME_AND_FAIL.set(latch);
126    Future<byte[]> future =
127      am.moveAsync(new RegionPlan(region, rsn.getRegionLocation(), rsn.getRegionLocation()));
128    TransitRegionStateProcedure proc =
129      procExec.getProcedures().stream().filter(p -> p instanceof TransitRegionStateProcedure)
130        .filter(p -> !p.isFinished()).map(p -> (TransitRegionStateProcedure) p).findAny().get();
131
132    // wait until we schedule the OpenRegionProcedure
133    UTIL.waitFor(10000,
134      () -> proc.getCurrentStateId() == REGION_STATE_TRANSITION_CONFIRM_OPENED_VALUE);
135    // Fail the reportRegionStateTransition for closing
136    latch.countDown();
137    future.get();
138
139    // confirm that the region can still be write
140    try (Table table = UTIL.getConnection().getTableBuilder(NAME, null).setWriteRpcTimeout(1000)
141      .setOperationTimeout(2000).build()) {
142      table.put(
143        new Put(Bytes.toBytes("key")).addColumn(CF, Bytes.toBytes("cq"), Bytes.toBytes("val")));
144    }
145  }
146}