View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver.handler;
20  
21  import java.io.IOException;
22  import java.util.concurrent.atomic.AtomicBoolean;
23  
24  import org.apache.commons.logging.Log;
25  import org.apache.commons.logging.LogFactory;
26  import org.apache.hadoop.classification.InterfaceAudience;
27  import org.apache.hadoop.hbase.HRegionInfo;
28  import org.apache.hadoop.hbase.HTableDescriptor;
29  import org.apache.hadoop.hbase.Server;
30  import org.apache.hadoop.hbase.executor.EventHandler;
31  import org.apache.hadoop.hbase.executor.EventType;
32  import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
33  import org.apache.hadoop.hbase.regionserver.HRegion;
34  import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
35  import org.apache.hadoop.hbase.regionserver.RegionServerServices;
36  import org.apache.hadoop.hbase.util.CancelableProgressable;
37  /**
38   * Handles opening of a region on a region server.
39   * <p>
40   * This is executed after receiving an OPEN RPC from the master or client.
41   */
42  @InterfaceAudience.Private
43  public class OpenRegionHandler extends EventHandler {
44    private static final Log LOG = LogFactory.getLog(OpenRegionHandler.class);
45  
46    protected final RegionServerServices rsServices;
47  
48    private final HRegionInfo regionInfo;
49    private final HTableDescriptor htd;
50  
51    public OpenRegionHandler(final Server server,
52        final RegionServerServices rsServices, HRegionInfo regionInfo,
53        HTableDescriptor htd) {
54      this(server, rsServices, regionInfo, htd, EventType.M_RS_OPEN_REGION);
55    }
56  
57    protected OpenRegionHandler(final Server server,
58        final RegionServerServices rsServices, final HRegionInfo regionInfo,
59        final HTableDescriptor htd, EventType eventType) {
60      super(server, eventType);
61      this.rsServices = rsServices;
62      this.regionInfo = regionInfo;
63      this.htd = htd;
64    }
65  
66    public HRegionInfo getRegionInfo() {
67      return regionInfo;
68    }
69  
70    @Override
71    public void process() throws IOException {
72      boolean openSuccessful = false;
73      final String regionName = regionInfo.getRegionNameAsString();
74      HRegion region = null;
75  
76      try {
77        if (this.server.isStopped() || this.rsServices.isStopping()) {
78          return;
79        }
80        final String encodedName = regionInfo.getEncodedName();
81  
82        // 2 different difficult situations can occur
83        // 1) The opening was cancelled. This is an expected situation
84        // 2) The region is now marked as online while we're suppose to open. This would be a bug.
85  
86        // Check that this region is not already online
87        if (this.rsServices.getFromOnlineRegions(encodedName) != null) {
88          LOG.error("Region " + encodedName +
89              " was already online when we started processing the opening. " +
90              "Marking this new attempt as failed");
91          return;
92        }
93  
94        // Check that we're still supposed to open the region.
95        // If fails, just return.  Someone stole the region from under us.
96        if (!isRegionStillOpening()){
97          LOG.error("Region " + encodedName + " opening cancelled");
98          return;
99        }
100 
101       // Open region.  After a successful open, failures in subsequent
102       // processing needs to do a close as part of cleanup.
103       region = openRegion();
104       if (region == null) {
105         return;
106       }
107 
108       if (!updateMeta(region) || this.server.isStopped() ||
109           this.rsServices.isStopping()) {
110         return;
111       }
112 
113       if (!isRegionStillOpening()) {
114         return;
115       }
116 
117       // Successful region open, and add it to OnlineRegions
118       this.rsServices.addToOnlineRegions(region);
119       openSuccessful = true;
120 
121       // Done!  Successful region open
122       LOG.debug("Opened " + regionName + " on " +
123         this.server.getServerName());
124     } finally {
125       // Do all clean up here
126       if (!openSuccessful) {
127         doCleanUpOnFailedOpen(region);
128       }
129       final Boolean current = this.rsServices.getRegionsInTransitionInRS().
130           remove(this.regionInfo.getEncodedNameAsBytes());
131 
132       // Let's check if we have met a race condition on open cancellation....
133       // A better solution would be to not have any race condition.
134       // this.rsServices.getRegionsInTransitionInRS().remove(
135       //  this.regionInfo.getEncodedNameAsBytes(), Boolean.TRUE);
136       // would help.
137       if (openSuccessful) {
138         if (current == null) { // Should NEVER happen, but let's be paranoid.
139           LOG.error("Bad state: we've just opened a region that was NOT in transition. Region="
140               + regionName);
141         } else if (Boolean.FALSE.equals(current)) { // Can happen, if we're
142                                                     // really unlucky.
143           LOG.error("Race condition: we've finished to open a region, while a close was requested "
144               + " on region=" + regionName + ". It can be a critical error, as a region that"
145               + " should be closed is now opened. Closing it now");
146           cleanupFailedOpen(region);
147         }
148       }
149     }
150   }
151 
152   private void doCleanUpOnFailedOpen(HRegion region)
153       throws IOException {
154     try {
155       if (region != null) {
156         cleanupFailedOpen(region);
157       }
158     } finally {
159       rsServices.reportRegionStateTransition(TransitionCode.FAILED_OPEN, regionInfo);
160     }
161   }
162 
163   /**
164    * Update ZK or META.  This can take a while if for example the
165    * hbase:meta is not available -- if server hosting hbase:meta crashed and we are
166    * waiting on it to come back -- so run in a thread and keep updating znode
167    * state meantime so master doesn't timeout our region-in-transition.
168    * Caller must cleanup region if this fails.
169    */
170   boolean updateMeta(final HRegion r) {
171     if (this.server.isStopped() || this.rsServices.isStopping()) {
172       return false;
173     }
174     // Object we do wait/notify on.  Make it boolean.  If set, we're done.
175     // Else, wait.
176     final AtomicBoolean signaller = new AtomicBoolean(false);
177     PostOpenDeployTasksThread t = new PostOpenDeployTasksThread(r,
178       this.server, this.rsServices, signaller);
179     t.start();
180     // Post open deploy task:
181     //   meta => update meta location in ZK
182     //   other region => update meta
183     long now = System.currentTimeMillis();
184     long lastUpdate = now;
185     boolean tickleOpening = true;
186     while (!signaller.get() && t.isAlive() && !this.server.isStopped() &&
187         !this.rsServices.isStopping() && isRegionStillOpening()) {
188       long elapsed = now - lastUpdate;
189       if (elapsed > 120000) { // 2 minutes, no need to tickleOpening too often
190         // Only tickle OPENING if postOpenDeployTasks is taking some time.
191         lastUpdate = now;
192       }
193       synchronized (signaller) {
194         try {
195           // Wait for 10 seconds, so that server shutdown
196           // won't take too long if this thread happens to run.
197           if (!signaller.get()) signaller.wait(10000);
198         } catch (InterruptedException e) {
199           // Go to the loop check.
200         }
201       }
202       now = System.currentTimeMillis();
203     }
204     // Is thread still alive?  We may have left above loop because server is
205     // stopping or we timed out the edit.  Is so, interrupt it.
206     if (t.isAlive()) {
207       if (!signaller.get()) {
208         // Thread still running; interrupt
209         LOG.debug("Interrupting thread " + t);
210         t.interrupt();
211       }
212       try {
213         t.join();
214       } catch (InterruptedException ie) {
215         LOG.warn("Interrupted joining " +
216           r.getRegionInfo().getRegionNameAsString(), ie);
217         Thread.currentThread().interrupt();
218       }
219     }
220 
221     // Was there an exception opening the region?  This should trigger on
222     // InterruptedException too.  If so, we failed.  Even if tickle opening fails
223     // then it is a failure.
224     return ((!Thread.interrupted() && t.getException() == null) && tickleOpening);
225   }
226 
227   /**
228    * Thread to run region post open tasks. Call {@link #getException()} after
229    * the thread finishes to check for exceptions running
230    * {@link RegionServerServices#postOpenDeployTasks(HRegion)
231    */
232   static class PostOpenDeployTasksThread extends Thread {
233     private Throwable exception = null;
234     private final Server server;
235     private final RegionServerServices services;
236     private final HRegion region;
237     private final AtomicBoolean signaller;
238 
239     PostOpenDeployTasksThread(final HRegion region, final Server server,
240         final RegionServerServices services, final AtomicBoolean signaller) {
241       super("PostOpenDeployTasks:" + region.getRegionInfo().getEncodedName());
242       this.setDaemon(true);
243       this.server = server;
244       this.services = services;
245       this.region = region;
246       this.signaller = signaller;
247     }
248 
249     public void run() {
250       try {
251         this.services.postOpenDeployTasks(this.region);
252       } catch (IOException e) {
253         server.abort("Exception running postOpenDeployTasks; region=" +
254             this.region.getRegionInfo().getEncodedName(), e);
255       } catch (Throwable e) {
256         LOG.warn("Exception running postOpenDeployTasks; region=" +
257           this.region.getRegionInfo().getEncodedName(), e);
258         this.exception = e;
259       }
260       // We're done.  Set flag then wake up anyone waiting on thread to complete.
261       this.signaller.set(true);
262       synchronized (this.signaller) {
263         this.signaller.notify();
264       }
265     }
266 
267     /**
268      * @return Null or the run exception; call this method after thread is done.
269      */
270     Throwable getException() {
271       return this.exception;
272     }
273   }
274 
275   /**
276    * @return Instance of HRegion if successful open else null.
277    */
278   HRegion openRegion() {
279     HRegion region = null;
280     try {
281       // Instantiate the region.  This also periodically tickles OPENING
282       // state so master doesn't timeout this region in transition.
283       region = HRegion.openHRegion(this.regionInfo, this.htd,
284         this.rsServices.getWAL(this.regionInfo),
285         this.server.getConfiguration(),
286         this.rsServices,
287         new CancelableProgressable() {
288           public boolean progress() {
289             if (!isRegionStillOpening()) {
290               LOG.warn("Open region aborted since it isn't opening any more");
291               return false;
292             }
293             return true;
294           }
295         });
296     } catch (Throwable t) {
297       // We failed open. Our caller will see the 'null' return value
298       // and transition the node back to FAILED_OPEN. If that fails,
299       // we rely on the Timeout Monitor in the master to reassign.
300       LOG.error(
301           "Failed open of region=" + this.regionInfo.getRegionNameAsString()
302               + ", starting to roll back the global memstore size.", t);
303       // Decrease the global memstore size.
304       if (this.rsServices != null) {
305         RegionServerAccounting rsAccounting =
306           this.rsServices.getRegionServerAccounting();
307         if (rsAccounting != null) {
308           rsAccounting.rollbackRegionReplayEditsSize(this.regionInfo.getRegionName());
309         }
310       }
311     }
312     return region;
313   }
314 
315   void cleanupFailedOpen(final HRegion region) throws IOException {
316     if (region != null) {
317       this.rsServices.removeFromOnlineRegions(region, null);
318       region.close();
319     }
320   }
321 
322   private boolean isRegionStillOpening() {
323     byte[] encodedName = regionInfo.getEncodedNameAsBytes();
324     Boolean action = rsServices.getRegionsInTransitionInRS().get(encodedName);
325     return Boolean.TRUE.equals(action); // true means opening for RIT
326   }
327 }