View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.util.ArrayList;
24  import java.util.List;
25  import java.util.ListIterator;
26  import java.util.Map;
27  import java.util.concurrent.Callable;
28  import java.util.concurrent.ExecutionException;
29  import java.util.concurrent.Executors;
30  import java.util.concurrent.Future;
31  import java.util.concurrent.ThreadFactory;
32  import java.util.concurrent.ThreadPoolExecutor;
33  import java.util.concurrent.TimeUnit;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.classification.InterfaceAudience;
38  import org.apache.hadoop.hbase.HConstants;
39  import org.apache.hadoop.hbase.HRegionInfo;
40  import org.apache.hadoop.hbase.Server;
41  import org.apache.hadoop.hbase.ServerName;
42  import org.apache.hadoop.hbase.client.Mutation;
43  import org.apache.hadoop.hbase.client.Put;
44  import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
45  import org.apache.hadoop.hbase.util.Bytes;
46  import org.apache.hadoop.hbase.util.CancelableProgressable;
47  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
48  import org.apache.hadoop.hbase.util.HasThread;
49  import org.apache.hadoop.hbase.util.PairOfSameType;
50  import org.apache.zookeeper.KeeperException;
51  
52  import com.google.common.util.concurrent.ThreadFactoryBuilder;
53  
54  /**
55   * Executes region split as a "transaction".  Call {@link #prepare()} to setup
56   * the transaction, {@link #execute(Server, RegionServerServices)} to run the
57   * transaction and {@link #rollback(Server, RegionServerServices)} to cleanup if execute fails.
58   *
59   * <p>Here is an example of how you would use this class:
60   * <pre>
61   *  SplitTransaction st = new SplitTransaction(this.conf, parent, midKey)
62   *  if (!st.prepare()) return;
63   *  try {
64   *    st.execute(server, services);
65   *  } catch (IOException ioe) {
66   *    try {
67   *      st.rollback(server, services);
68   *      return;
69   *    } catch (RuntimeException e) {
70   *      myAbortable.abort("Failed split, abort");
71   *    }
72   *  }
73   * </Pre>
74   * <p>This class is not thread safe.  Caller needs ensure split is run by
75   * one thread only.
76   */
77  @InterfaceAudience.Private
78  public class SplitTransaction {
79    private static final Log LOG = LogFactory.getLog(SplitTransaction.class);
80  
81    /*
82     * Region to split
83     */
84    private final HRegion parent;
85    private HRegionInfo hri_a;
86    private HRegionInfo hri_b;
87    private long fileSplitTimeout = 30000;
88  
89    /*
90     * Row to split around
91     */
92    private final byte [] splitrow;
93  
94    /**
95     * Types to add to the transaction journal.
96     * Each enum is a step in the split transaction. Used to figure how much
97     * we need to rollback.
98     */
99    enum JournalEntry {
100     /**
101      * Set region as in transition, set it into SPLITTING state.
102      */
103     SET_SPLITTING,
104     /**
105      * We created the temporary split data directory.
106      */
107     CREATE_SPLIT_DIR,
108     /**
109      * Closed the parent region.
110      */
111     CLOSED_PARENT_REGION,
112     /**
113      * The parent has been taken out of the server's online regions list.
114      */
115     OFFLINED_PARENT,
116     /**
117      * Started in on creation of the first daughter region.
118      */
119     STARTED_REGION_A_CREATION,
120     /**
121      * Started in on the creation of the second daughter region.
122      */
123     STARTED_REGION_B_CREATION,
124     /**
125      * Point of no return.
126      * If we got here, then transaction is not recoverable other than by
127      * crashing out the regionserver.
128      */
129     PONR
130   }
131 
132   /*
133    * Journal of how far the split transaction has progressed.
134    */
135   private final List<JournalEntry> journal = new ArrayList<JournalEntry>();
136 
137   /**
138    * Constructor
139    * @param r Region to split
140    * @param splitrow Row to split around
141    */
142   public SplitTransaction(final HRegion r, final byte [] splitrow) {
143     this.parent = r;
144     this.splitrow = splitrow;
145   }
146 
147   /**
148    * Does checks on split inputs.
149    * @return <code>true</code> if the region is splittable else
150    * <code>false</code> if it is not (e.g. its already closed, etc.).
151    */
152   public boolean prepare() {
153     if (!this.parent.isSplittable()) return false;
154     // Split key can be null if this region is unsplittable; i.e. has refs.
155     if (this.splitrow == null) return false;
156     HRegionInfo hri = this.parent.getRegionInfo();
157     parent.prepareToSplit();
158     // Check splitrow.
159     byte [] startKey = hri.getStartKey();
160     byte [] endKey = hri.getEndKey();
161     if (Bytes.equals(startKey, splitrow) ||
162         !this.parent.getRegionInfo().containsRow(splitrow)) {
163       LOG.info("Split row is not inside region key range or is equal to " +
164           "startkey: " + Bytes.toStringBinary(this.splitrow));
165       return false;
166     }
167     long rid = getDaughterRegionIdTimestamp(hri);
168     this.hri_a = new HRegionInfo(hri.getTable(), startKey, this.splitrow, false, rid);
169     this.hri_b = new HRegionInfo(hri.getTable(), this.splitrow, endKey, false, rid);
170     return true;
171   }
172 
173   /**
174    * Calculate daughter regionid to use.
175    * @param hri Parent {@link HRegionInfo}
176    * @return Daughter region id (timestamp) to use.
177    */
178   private static long getDaughterRegionIdTimestamp(final HRegionInfo hri) {
179     long rid = EnvironmentEdgeManager.currentTime();
180     // Regionid is timestamp.  Can't be less than that of parent else will insert
181     // at wrong location in hbase:meta (See HBASE-710).
182     if (rid < hri.getRegionId()) {
183       LOG.warn("Clock skew; parent regions id is " + hri.getRegionId() +
184         " but current time here is " + rid);
185       rid = hri.getRegionId() + 1;
186     }
187     return rid;
188   }
189 
190   private static IOException closedByOtherException = new IOException(
191       "Failed to close region: already closed by another thread");
192 
193   /**
194    * Prepare the regions and region files.
195    * @param server Hosting server instance.  Can be null when testing (won't try
196    * and update in zk if a null server)
197    * @param services Used to online/offline regions.
198    * @throws IOException If thrown, transaction failed.
199    *    Call {@link #rollback(Server, RegionServerServices)}
200    * @return Regions created
201    */
202   /* package */PairOfSameType<HRegion> createDaughters(final Server server,
203       final RegionServerServices services) throws IOException {
204     LOG.info("Starting split of region " + this.parent);
205     if ((server != null && server.isStopped()) ||
206         (services != null && services.isStopping())) {
207       throw new IOException("Server is stopped or stopping");
208     }
209     assert !this.parent.lock.writeLock().isHeldByCurrentThread():
210       "Unsafe to hold write lock while performing RPCs";
211 
212     // Coprocessor callback
213     if (this.parent.getCoprocessorHost() != null) {
214       this.parent.getCoprocessorHost().preSplit();
215     }
216 
217     // Coprocessor callback
218     if (this.parent.getCoprocessorHost() != null) {
219       this.parent.getCoprocessorHost().preSplit(this.splitrow);
220     }
221 
222     // If true, no cluster to write meta edits to or to update znodes in.
223     boolean testing = server == null? true:
224         server.getConfiguration().getBoolean("hbase.testing.nocluster", false);
225     this.fileSplitTimeout = testing ? this.fileSplitTimeout :
226         server.getConfiguration().getLong("hbase.regionserver.fileSplitTimeout",
227           this.fileSplitTimeout);
228 
229     PairOfSameType<HRegion> daughterRegions = stepsBeforePONR(server, services, testing);
230 
231     List<Mutation> metaEntries = new ArrayList<Mutation>();
232     if (this.parent.getCoprocessorHost() != null) {
233       if (this.parent.getCoprocessorHost().
234           preSplitBeforePONR(this.splitrow, metaEntries)) {
235         throw new IOException("Coprocessor bypassing region "
236             + this.parent.getRegionNameAsString() + " split.");
237       }
238       try {
239         for (Mutation p : metaEntries) {
240           HRegionInfo.parseRegionName(p.getRow());
241         }
242       } catch (IOException e) {
243         LOG.error("Row key of mutation from coprossor is not parsable as region name."
244             + "Mutations from coprocessor should only for hbase:meta table.");
245         throw e;
246       }
247     }
248 
249     // This is the point of no return.  Adding subsequent edits to .META. as we
250     // do below when we do the daughter opens adding each to .META. can fail in
251     // various interesting ways the most interesting of which is a timeout
252     // BUT the edits all go through (See HBASE-3872).  IF we reach the PONR
253     // then subsequent failures need to crash out this regionserver; the
254     // server shutdown processing should be able to fix-up the incomplete split.
255     // The offlined parent will have the daughters as extra columns.  If
256     // we leave the daughter regions in place and do not remove them when we
257     // crash out, then they will have their references to the parent in place
258     // still and the server shutdown fixup of .META. will point to these
259     // regions.
260     // We should add PONR JournalEntry before offlineParentInMeta,so even if
261     // OfflineParentInMeta timeout,this will cause regionserver exit,and then
262     // master ServerShutdownHandler will fix daughter & avoid data loss. (See
263     // HBase-4562).
264     this.journal.add(JournalEntry.PONR);
265 
266     // Edit parent in meta.  Offlines parent region and adds splita and splitb
267     // as an atomic update. See HBASE-7721. This update to META makes the region
268     // will determine whether the region is split or not in case of failures.
269     // If it is successful, master will roll-forward, if not, master will rollback
270     // and assign the parent region.
271     if (services != null && !services.reportRegionStateTransition(TransitionCode.SPLIT_PONR,
272         parent.getRegionInfo(), hri_a, hri_b)) {
273       // Passed PONR, let SSH clean it up
274       throw new IOException("Failed to notify master that split passed PONR: "
275         + parent.getRegionInfo().getRegionNameAsString());
276     }
277     return daughterRegions;
278   }
279 
280   public PairOfSameType<HRegion> stepsBeforePONR(final Server server,
281       final RegionServerServices services, boolean testing) throws IOException {
282     if (services != null && !services.reportRegionStateTransition(TransitionCode.READY_TO_SPLIT,
283         parent.getRegionInfo(), hri_a, hri_b)) {
284       throw new IOException("Failed to get ok from master to split "
285         + parent.getRegionNameAsString());
286     }
287     this.journal.add(JournalEntry.SET_SPLITTING);
288 
289     this.parent.getRegionFileSystem().createSplitsDir();
290     this.journal.add(JournalEntry.CREATE_SPLIT_DIR);
291 
292     Map<byte[], List<StoreFile>> hstoreFilesToSplit = null;
293     Exception exceptionToThrow = null;
294     try{
295       hstoreFilesToSplit = this.parent.close(false);
296     } catch (Exception e) {
297       exceptionToThrow = e;
298     }
299     if (exceptionToThrow == null && hstoreFilesToSplit == null) {
300       // The region was closed by a concurrent thread.  We can't continue
301       // with the split, instead we must just abandon the split.  If we
302       // reopen or split this could cause problems because the region has
303       // probably already been moved to a different server, or is in the
304       // process of moving to a different server.
305       exceptionToThrow = closedByOtherException;
306     }
307     if (exceptionToThrow != closedByOtherException) {
308       this.journal.add(JournalEntry.CLOSED_PARENT_REGION);
309     }
310     if (exceptionToThrow != null) {
311       if (exceptionToThrow instanceof IOException) throw (IOException)exceptionToThrow;
312       throw new IOException(exceptionToThrow);
313     }
314     if (!testing) {
315       services.removeFromOnlineRegions(this.parent, null);
316     }
317     this.journal.add(JournalEntry.OFFLINED_PARENT);
318 
319     // TODO: If splitStoreFiles were multithreaded would we complete steps in
320     // less elapsed time?  St.Ack 20100920
321     //
322     // splitStoreFiles creates daughter region dirs under the parent splits dir
323     // Nothing to unroll here if failure -- clean up of CREATE_SPLIT_DIR will
324     // clean this up.
325     splitStoreFiles(hstoreFilesToSplit);
326 
327     // Log to the journal that we are creating region A, the first daughter
328     // region.  We could fail halfway through.  If we do, we could have left
329     // stuff in fs that needs cleanup -- a storefile or two.  Thats why we
330     // add entry to journal BEFORE rather than AFTER the change.
331     this.journal.add(JournalEntry.STARTED_REGION_A_CREATION);
332     HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a);
333 
334     // Ditto
335     this.journal.add(JournalEntry.STARTED_REGION_B_CREATION);
336     HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b);
337     return new PairOfSameType<HRegion>(a, b);
338   }
339 
340   /**
341    * Perform time consuming opening of the daughter regions.
342    * @param server Hosting server instance.  Can be null when testing
343    * @param services Used to online/offline regions.
344    * @param a first daughter region
345    * @param a second daughter region
346    * @throws IOException If thrown, transaction failed.
347    *          Call {@link #rollback(Server, RegionServerServices)}
348    */
349   /* package */void openDaughters(final Server server,
350       final RegionServerServices services, HRegion a, HRegion b)
351       throws IOException {
352     boolean stopped = server != null && server.isStopped();
353     boolean stopping = services != null && services.isStopping();
354     // TODO: Is this check needed here?
355     if (stopped || stopping) {
356       LOG.info("Not opening daughters " +
357           b.getRegionInfo().getRegionNameAsString() +
358           " and " +
359           a.getRegionInfo().getRegionNameAsString() +
360           " because stopping=" + stopping + ", stopped=" + stopped);
361     } else {
362       // Open daughters in parallel.
363       DaughterOpener aOpener = new DaughterOpener(server, a);
364       DaughterOpener bOpener = new DaughterOpener(server, b);
365       aOpener.start();
366       bOpener.start();
367       try {
368         aOpener.join();
369         bOpener.join();
370       } catch (InterruptedException e) {
371         throw (InterruptedIOException)new InterruptedIOException().initCause(e);
372       }
373       if (aOpener.getException() != null) {
374         throw new IOException("Failed " +
375           aOpener.getName(), aOpener.getException());
376       }
377       if (bOpener.getException() != null) {
378         throw new IOException("Failed " +
379           bOpener.getName(), bOpener.getException());
380       }
381       if (services != null) {
382         if (!services.reportRegionStateTransition(TransitionCode.SPLIT,
383             parent.getRegionInfo(), hri_a, hri_b)) {
384           throw new IOException("Failed to report split region to master: "
385             + parent.getRegionInfo().getShortNameToLog());
386         }
387         // Should add it to OnlineRegions
388         services.addToOnlineRegions(b);
389         services.addToOnlineRegions(a);
390       }
391     }
392   }
393 
394   /**
395    * Run the transaction.
396    * @param server Hosting server instance.  Can be null when testing
397    * @param services Used to online/offline regions.
398    * @throws IOException If thrown, transaction failed.
399    *          Call {@link #rollback(Server, RegionServerServices)}
400    * @return Regions created
401    * @throws IOException
402    * @see #rollback(Server, RegionServerServices)
403    */
404   public PairOfSameType<HRegion> execute(final Server server,
405       final RegionServerServices services)
406   throws IOException {
407     PairOfSameType<HRegion> regions = createDaughters(server, services);
408     if (this.parent.getCoprocessorHost() != null) {
409       this.parent.getCoprocessorHost().preSplitAfterPONR();
410     }
411     return stepsAfterPONR(server, services, regions);
412   }
413 
414   public PairOfSameType<HRegion> stepsAfterPONR(final Server server,
415       final RegionServerServices services, PairOfSameType<HRegion> regions)
416       throws IOException {
417     openDaughters(server, services, regions.getFirst(), regions.getSecond());
418     // Coprocessor callback
419     if (parent.getCoprocessorHost() != null) {
420       parent.getCoprocessorHost().postSplit(regions.getFirst(), regions.getSecond());
421     }
422     return regions;
423   }
424 
425   public Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
426     p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
427       Bytes.toBytes(sn.getHostAndPort()));
428     p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
429       Bytes.toBytes(sn.getStartcode()));
430     p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER,
431         Bytes.toBytes(openSeqNum));
432     return p;
433   }
434 
435   /*
436    * Open daughter region in its own thread.
437    * If we fail, abort this hosting server.
438    */
439   class DaughterOpener extends HasThread {
440     private final Server server;
441     private final HRegion r;
442     private Throwable t = null;
443 
444     DaughterOpener(final Server s, final HRegion r) {
445       super((s == null? "null-services": s.getServerName()) +
446         "-daughterOpener=" + r.getRegionInfo().getEncodedName());
447       setDaemon(true);
448       this.server = s;
449       this.r = r;
450     }
451 
452     /**
453      * @return Null if open succeeded else exception that causes us fail open.
454      * Call it after this thread exits else you may get wrong view on result.
455      */
456     Throwable getException() {
457       return this.t;
458     }
459 
460     @Override
461     public void run() {
462       try {
463         openDaughterRegion(this.server, r);
464       } catch (Throwable t) {
465         this.t = t;
466       }
467     }
468   }
469 
470   /**
471    * Open daughter regions, add them to online list and update meta.
472    * @param server
473    * @param daughter
474    * @throws IOException
475    * @throws KeeperException
476    */
477   void openDaughterRegion(final Server server, final HRegion daughter)
478   throws IOException, KeeperException {
479     HRegionInfo hri = daughter.getRegionInfo();
480     LoggingProgressable reporter = server == null ? null
481         : new LoggingProgressable(hri, server.getConfiguration().getLong(
482             "hbase.regionserver.split.daughter.open.log.interval", 10000));
483     daughter.openHRegion(reporter);
484   }
485 
486   static class LoggingProgressable implements CancelableProgressable {
487     private final HRegionInfo hri;
488     private long lastLog = -1;
489     private final long interval;
490 
491     LoggingProgressable(final HRegionInfo hri, final long interval) {
492       this.hri = hri;
493       this.interval = interval;
494     }
495 
496     @Override
497     public boolean progress() {
498       long now = System.currentTimeMillis();
499       if (now - lastLog > this.interval) {
500         LOG.info("Opening " + this.hri.getRegionNameAsString());
501         this.lastLog = now;
502       }
503       return true;
504     }
505   }
506 
507   private void splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit)
508       throws IOException {
509     if (hstoreFilesToSplit == null) {
510       // Could be null because close didn't succeed -- for now consider it fatal
511       throw new IOException("Close returned empty list of StoreFiles");
512     }
513     // The following code sets up a thread pool executor with as many slots as
514     // there's files to split. It then fires up everything, waits for
515     // completion and finally checks for any exception
516     int nbFiles = hstoreFilesToSplit.size();
517     if (nbFiles == 0) {
518       // no file needs to be splitted.
519       return;
520     }
521     ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
522     builder.setNameFormat("StoreFileSplitter-%1$d");
523     ThreadFactory factory = builder.build();
524     ThreadPoolExecutor threadPool =
525       (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
526     List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);
527 
528     // Split each store file.
529     for (Map.Entry<byte[], List<StoreFile>> entry: hstoreFilesToSplit.entrySet()) {
530       for (StoreFile sf: entry.getValue()) {
531         StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
532         futures.add(threadPool.submit(sfs));
533       }
534     }
535     // Shutdown the pool
536     threadPool.shutdown();
537 
538     // Wait for all the tasks to finish
539     try {
540       boolean stillRunning = !threadPool.awaitTermination(
541           this.fileSplitTimeout, TimeUnit.MILLISECONDS);
542       if (stillRunning) {
543         threadPool.shutdownNow();
544         // wait for the thread to shutdown completely.
545         while (!threadPool.isTerminated()) {
546           Thread.sleep(50);
547         }
548         throw new IOException("Took too long to split the" +
549             " files and create the references, aborting split");
550       }
551     } catch (InterruptedException e) {
552       throw (InterruptedIOException)new InterruptedIOException().initCause(e);
553     }
554 
555     // Look for any exception
556     for (Future<Void> future: futures) {
557       try {
558         future.get();
559       } catch (InterruptedException e) {
560         throw (InterruptedIOException)new InterruptedIOException().initCause(e);
561       } catch (ExecutionException e) {
562         throw new IOException(e);
563       }
564     }
565   }
566 
567   private void splitStoreFile(final byte[] family, final StoreFile sf) throws IOException {
568     HRegionFileSystem fs = this.parent.getRegionFileSystem();
569     String familyName = Bytes.toString(family);
570     fs.splitStoreFile(this.hri_a, familyName, sf, this.splitrow, false);
571     fs.splitStoreFile(this.hri_b, familyName, sf, this.splitrow, true);
572   }
573 
574   /**
575    * Utility class used to do the file splitting / reference writing
576    * in parallel instead of sequentially.
577    */
578   class StoreFileSplitter implements Callable<Void> {
579     private final byte[] family;
580     private final StoreFile sf;
581 
582     /**
583      * Constructor that takes what it needs to split
584      * @param family Family that contains the store file
585      * @param sf which file
586      */
587     public StoreFileSplitter(final byte[] family, final StoreFile sf) {
588       this.sf = sf;
589       this.family = family;
590     }
591 
592     public Void call() throws IOException {
593       splitStoreFile(family, sf);
594       return null;
595     }
596   }
597 
598   /**
599    * @param server Hosting server instance (May be null when testing).
600    * @param services
601    * @throws IOException If thrown, rollback failed.  Take drastic action.
602    * @return True if we successfully rolled back, false if we got to the point
603    * of no return and so now need to abort the server to minimize damage.
604    */
605   @SuppressWarnings("deprecation")
606   public boolean rollback(final Server server, final RegionServerServices services)
607   throws IOException {
608     // Coprocessor callback
609     if (this.parent.getCoprocessorHost() != null) {
610       this.parent.getCoprocessorHost().preRollBackSplit();
611     }
612 
613     boolean result = true;
614     ListIterator<JournalEntry> iterator =
615       this.journal.listIterator(this.journal.size());
616     // Iterate in reverse.
617     while (iterator.hasPrevious()) {
618       JournalEntry je = iterator.previous();
619       switch(je) {
620 
621       case SET_SPLITTING:
622         if (services != null
623             && !services.reportRegionStateTransition(TransitionCode.SPLIT_REVERTED,
624                 parent.getRegionInfo(), hri_a, hri_b)) {
625           return false;
626         }
627         break;
628 
629       case CREATE_SPLIT_DIR:
630         this.parent.writestate.writesEnabled = true;
631         this.parent.getRegionFileSystem().cleanupSplitsDir();
632         break;
633 
634       case CLOSED_PARENT_REGION:
635         try {
636           // So, this returns a seqid but if we just closed and then reopened, we
637           // should be ok. On close, we flushed using sequenceid obtained from
638           // hosting regionserver so no need to propagate the sequenceid returned
639           // out of initialize below up into regionserver as we normally do.
640           // TODO: Verify.
641           this.parent.initialize();
642         } catch (IOException e) {
643           LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region " +
644             this.parent.getRegionNameAsString(), e);
645           throw new RuntimeException(e);
646         }
647         break;
648 
649       case STARTED_REGION_A_CREATION:
650         this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_a);
651         break;
652 
653       case STARTED_REGION_B_CREATION:
654         this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_b);
655         break;
656 
657       case OFFLINED_PARENT:
658         if (services != null) services.addToOnlineRegions(this.parent);
659         break;
660 
661       case PONR:
662         // We got to the point-of-no-return so we need to just abort. Return
663         // immediately.  Do not clean up created daughter regions.  They need
664         // to be in place so we don't delete the parent region mistakenly.
665         // See HBASE-3872.
666         return false;
667 
668       default:
669         throw new RuntimeException("Unhandled journal entry: " + je);
670       }
671     }
672     // Coprocessor callback
673     if (this.parent.getCoprocessorHost() != null) {
674       this.parent.getCoprocessorHost().postRollBackSplit();
675     }
676     return result;
677   }
678 
679   HRegionInfo getFirstDaughter() {
680     return hri_a;
681   }
682 
683   HRegionInfo getSecondDaughter() {
684     return hri_b;
685   }
686 
687 }