001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver.handler;
019
020import edu.umd.cs.findbugs.annotations.Nullable;
021import java.io.IOException;
022import java.util.concurrent.TimeUnit;
023import org.apache.hadoop.conf.Configuration;
024import org.apache.hadoop.hbase.HConstants;
025import org.apache.hadoop.hbase.TableName;
026import org.apache.hadoop.hbase.client.RegionInfo;
027import org.apache.hadoop.hbase.client.RegionReplicaUtil;
028import org.apache.hadoop.hbase.client.TableDescriptor;
029import org.apache.hadoop.hbase.executor.EventHandler;
030import org.apache.hadoop.hbase.executor.EventType;
031import org.apache.hadoop.hbase.regionserver.HRegion;
032import org.apache.hadoop.hbase.regionserver.HRegionServer;
033import org.apache.hadoop.hbase.regionserver.Region;
034import org.apache.hadoop.hbase.regionserver.RegionServerServices.PostOpenDeployContext;
035import org.apache.hadoop.hbase.regionserver.RegionServerServices.RegionStateTransitionContext;
036import org.apache.hadoop.hbase.util.RetryCounter;
037import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
038import org.apache.yetus.audience.InterfaceAudience;
039import org.slf4j.Logger;
040import org.slf4j.LoggerFactory;
041import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
042
043/**
044 * Handles opening of a region on a region server.
045 * <p/>
046 * Just done the same thing with the old {@link OpenRegionHandler}, with some modifications on
047 * fencing and retrying. But we need to keep the {@link OpenRegionHandler} as is to keep compatible
048 * with the zk less assignment for 1.x, otherwise it is not possible to do rolling upgrade.
049 */
050@InterfaceAudience.Private
051public class AssignRegionHandler extends EventHandler {
052
053  private static final Logger LOG = LoggerFactory.getLogger(AssignRegionHandler.class);
054
055  private final RegionInfo regionInfo;
056
057  private final long openProcId;
058
059  private final TableDescriptor tableDesc;
060
061  private final long masterSystemTime;
062
063  private final RetryCounter retryCounter;
064
065  public AssignRegionHandler(HRegionServer server, RegionInfo regionInfo, long openProcId,
066      @Nullable TableDescriptor tableDesc, long masterSystemTime, EventType eventType) {
067    super(server, eventType);
068    this.regionInfo = regionInfo;
069    this.openProcId = openProcId;
070    this.tableDesc = tableDesc;
071    this.masterSystemTime = masterSystemTime;
072    this.retryCounter = HandlerUtil.getRetryCounter();
073  }
074
075  private HRegionServer getServer() {
076    return (HRegionServer) server;
077  }
078
079  private void cleanUpAndReportFailure(IOException error) throws IOException {
080    LOG.warn("Failed to open region {}, will report to master", regionInfo.getRegionNameAsString(),
081      error);
082    HRegionServer rs = getServer();
083    rs.getRegionsInTransitionInRS().remove(regionInfo.getEncodedNameAsBytes(), Boolean.TRUE);
084    if (!rs.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.FAILED_OPEN,
085      HConstants.NO_SEQNUM, openProcId, masterSystemTime, regionInfo))) {
086      throw new IOException(
087        "Failed to report failed open to master: " + regionInfo.getRegionNameAsString());
088    }
089  }
090
091  @Override
092  public void process() throws IOException {
093    HRegionServer rs = getServer();
094    String encodedName = regionInfo.getEncodedName();
095    byte[] encodedNameBytes = regionInfo.getEncodedNameAsBytes();
096    String regionName = regionInfo.getRegionNameAsString();
097    Region onlineRegion = rs.getRegion(encodedName);
098    if (onlineRegion != null) {
099      LOG.warn("Received OPEN for {} which is already online", regionName);
100      // Just follow the old behavior, do we need to call reportRegionStateTransition? Maybe not?
101      // For normal case, it could happen that the rpc call to schedule this handler is succeeded,
102      // but before returning to master the connection is broken. And when master tries again, we
103      // have already finished the opening. For this case we do not need to call
104      // reportRegionStateTransition any more.
105      return;
106    }
107    Boolean previous = rs.getRegionsInTransitionInRS().putIfAbsent(encodedNameBytes, Boolean.TRUE);
108    if (previous != null) {
109      if (previous) {
110        // The region is opening and this maybe a retry on the rpc call, it is safe to ignore it.
111        LOG.info("Receiving OPEN for {} which we are already trying to OPEN" +
112          " - ignoring this new request for this region.", regionName);
113      } else {
114        // The region is closing. This is possible as we will update the region state to CLOSED when
115        // calling reportRegionStateTransition, so the HMaster will think the region is offline,
116        // before we actually close the region, as reportRegionStateTransition is part of the
117        // closing process.
118        long backoff = retryCounter.getBackoffTimeAndIncrementAttempts();
119        LOG.info(
120          "Receiving OPEN for {} which we are trying to close, try again after {}ms",
121          regionName, backoff);
122        rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS);
123      }
124      return;
125    }
126    LOG.info("Open {}", regionName);
127    HRegion region;
128    try {
129      TableDescriptor htd =
130        tableDesc != null ? tableDesc : rs.getTableDescriptors().get(regionInfo.getTable());
131      if (htd == null) {
132        throw new IOException("Missing table descriptor for " + regionName);
133      }
134      // pass null for the last parameter, which used to be a CancelableProgressable, as now the
135      // opening can not be interrupted by a close request any more.
136      Configuration conf = rs.getConfiguration();
137      TableName tn = htd.getTableName();
138      if (ServerRegionReplicaUtil.isMetaRegionReplicaReplicationEnabled(conf, tn)) {
139        if (RegionReplicaUtil.isDefaultReplica(this.regionInfo.getReplicaId())) {
140          // Add the hbase:meta replication source on replica zero/default.
141          rs.getReplicationSourceService().getReplicationManager().
142            addCatalogReplicationSource(this.regionInfo);
143        }
144      }
145      region = HRegion.openHRegion(regionInfo, htd, rs.getWAL(regionInfo), conf, rs, null);
146    } catch (IOException e) {
147      cleanUpAndReportFailure(e);
148      return;
149    }
150    // From here on out, this is PONR. We can not revert back. The only way to address an
151    // exception from here on out is to abort the region server.
152    rs.postOpenDeployTasks(new PostOpenDeployContext(region, openProcId, masterSystemTime));
153    rs.addRegion(region);
154    LOG.info("Opened {}", regionName);
155    // Cache the open region procedure id after report region transition succeed.
156    rs.finishRegionProcedure(openProcId);
157    Boolean current = rs.getRegionsInTransitionInRS().remove(regionInfo.getEncodedNameAsBytes());
158    if (current == null) {
159      // Should NEVER happen, but let's be paranoid.
160      LOG.error("Bad state: we've just opened {} which was NOT in transition", regionName);
161    } else if (!current) {
162      // Should NEVER happen, but let's be paranoid.
163      LOG.error("Bad state: we've just opened {} which was closing", regionName);
164    }
165  }
166
167  @Override
168  protected void handleException(Throwable t) {
169    LOG.warn("Fatal error occurred while opening region {}, aborting...",
170      regionInfo.getRegionNameAsString(), t);
171    // Clear any reference in getServer().getRegionsInTransitionInRS() otherwise can hold up
172    // regionserver abort on cluster shutdown. HBASE-23984.
173    getServer().getRegionsInTransitionInRS().remove(regionInfo.getEncodedNameAsBytes());
174    getServer().abort(
175      "Failed to open region " + regionInfo.getRegionNameAsString() + " and can not recover", t);
176  }
177
178  public static AssignRegionHandler create(HRegionServer server, RegionInfo regionInfo,
179      long openProcId, TableDescriptor tableDesc, long masterSystemTime) {
180    EventType eventType;
181    if (regionInfo.isMetaRegion()) {
182      eventType = EventType.M_RS_OPEN_META;
183    } else if (regionInfo.getTable().isSystemTable() ||
184      (tableDesc != null && tableDesc.getPriority() >= HConstants.ADMIN_QOS)) {
185      eventType = EventType.M_RS_OPEN_PRIORITY_REGION;
186    } else {
187      eventType = EventType.M_RS_OPEN_REGION;
188    }
189    return new AssignRegionHandler(server, regionInfo, openProcId, tableDesc, masterSystemTime,
190      eventType);
191  }
192}