View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.net.ConnectException;
24  import java.net.SocketTimeoutException;
25  import java.util.ArrayList;
26  import java.util.List;
27  import java.util.Map;
28  import java.util.concurrent.atomic.AtomicInteger;
29  
30  import org.apache.commons.lang.math.RandomUtils;
31  import org.apache.commons.lang.mutable.MutableInt;
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.classification.InterfaceAudience;
35  import org.apache.hadoop.conf.Configuration;
36  import org.apache.hadoop.fs.FileSystem;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.hbase.HConstants;
39  import org.apache.hadoop.hbase.NotServingRegionException;
40  import org.apache.hadoop.hbase.ServerName;
41  import org.apache.hadoop.hbase.SplitLogCounters;
42  import org.apache.hadoop.hbase.SplitLogTask;
43  import org.apache.hadoop.hbase.client.HConnectionManager;
44  import org.apache.hadoop.hbase.client.RetriesExhaustedException;
45  import org.apache.hadoop.hbase.exceptions.DeserializationException;
46  import org.apache.hadoop.hbase.executor.ExecutorService;
47  import org.apache.hadoop.hbase.master.SplitLogManager;
48  import org.apache.hadoop.hbase.regionserver.handler.HLogSplitterHandler;
49  import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
50  import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
51  import org.apache.hadoop.hbase.util.CancelableProgressable;
52  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
53  import org.apache.hadoop.hbase.util.ExceptionUtil;
54  import org.apache.hadoop.hbase.util.FSUtils;
55  import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
56  import org.apache.hadoop.hbase.zookeeper.ZKUtil;
57  import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
58  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
59  import org.apache.hadoop.util.StringUtils;
60  import org.apache.zookeeper.AsyncCallback;
61  import org.apache.zookeeper.KeeperException;
62  import org.apache.zookeeper.data.Stat;
63  
64  /**
65   * This worker is spawned in every regionserver (should we also spawn one in
66   * the master?). The Worker waits for log splitting tasks to be put up by the
67   * {@link SplitLogManager} running in the master and races with other workers
68   * in other serves to acquire those tasks. The coordination is done via
69   * zookeeper. All the action takes place at /hbase/splitlog znode.
70   * <p>
71   * If a worker has successfully moved the task from state UNASSIGNED to
72   * OWNED then it owns the task. It keeps heart beating the manager by
73   * periodically moving the task from UNASSIGNED to OWNED state. On success it
74   * moves the task to TASK_DONE. On unrecoverable error it moves task state to
75   * ERR. If it cannot continue but wants the master to retry the task then it
76   * moves the task state to RESIGNED.
77   * <p>
78   * The manager can take a task away from a worker by moving the task from
79   * OWNED to UNASSIGNED. In the absence of a global lock there is a
80   * unavoidable race here - a worker might have just finished its task when it
81   * is stripped of its ownership. Here we rely on the idempotency of the log
82   * splitting task for correctness
83   */
84  @InterfaceAudience.Private
85  public class SplitLogWorker extends ZooKeeperListener implements Runnable {
86    public static final int DEFAULT_MAX_SPLITTERS = 2;
87  
88    private static final Log LOG = LogFactory.getLog(SplitLogWorker.class);
89    private static final int checkInterval = 5000; // 5 seconds
90    private static final int FAILED_TO_OWN_TASK = -1;
91  
92    Thread worker;
93    private final ServerName serverName;
94    private final TaskExecutor splitTaskExecutor;
95    // thread pool which executes recovery work
96    private final ExecutorService executorService;
97  
98    private final Object taskReadyLock = new Object();
99    volatile int taskReadySeq = 0;
100   private volatile String currentTask = null;
101   private int currentVersion;
102   private volatile boolean exitWorker;
103   private final Object grabTaskLock = new Object();
104   private boolean workerInGrabTask = false;
105   private final int report_period;
106   private RegionServerServices server = null;
107   private Configuration conf = null;
108   protected final AtomicInteger tasksInProgress = new AtomicInteger(0);
109   private int maxConcurrentTasks = 0;
110 
111   public SplitLogWorker(ZooKeeperWatcher watcher, Configuration conf, RegionServerServices server,
112       TaskExecutor splitTaskExecutor) {
113     super(watcher);
114     this.server = server;
115     this.serverName = server.getServerName();
116     this.splitTaskExecutor = splitTaskExecutor;
117     report_period = conf.getInt("hbase.splitlog.report.period",
118       conf.getInt("hbase.splitlog.manager.timeout", SplitLogManager.DEFAULT_TIMEOUT) / 3);
119     this.conf = conf;
120     this.executorService = this.server.getExecutorService();
121     this.maxConcurrentTasks =
122         conf.getInt("hbase.regionserver.wal.max.splitters", DEFAULT_MAX_SPLITTERS);
123   }
124 
125   public SplitLogWorker(final ZooKeeperWatcher watcher, final Configuration conf,
126       RegionServerServices server, final LastSequenceId sequenceIdChecker) {
127     this(watcher, conf, server, new TaskExecutor() {
128       @Override
129       public Status exec(String filename, CancelableProgressable p) {
130         Path rootdir;
131         FileSystem fs;
132         try {
133           rootdir = FSUtils.getRootDir(conf);
134           fs = rootdir.getFileSystem(conf);
135         } catch (IOException e) {
136           LOG.warn("could not find root dir or fs", e);
137           return Status.RESIGNED;
138         }
139         // TODO have to correctly figure out when log splitting has been
140         // interrupted or has encountered a transient error and when it has
141         // encountered a bad non-retry-able persistent error.
142         try {
143           if (!HLogSplitter.splitLogFile(rootdir, fs.getFileStatus(new Path(rootdir, filename)),
144             fs, conf, p, sequenceIdChecker, watcher)) {
145             return Status.PREEMPTED;
146           }
147         } catch (InterruptedIOException iioe) {
148           LOG.warn("log splitting of " + filename + " interrupted, resigning", iioe);
149           return Status.RESIGNED;
150         } catch (IOException e) {
151           Throwable cause = e.getCause();
152           if (e instanceof RetriesExhaustedException && (cause instanceof NotServingRegionException 
153                   || cause instanceof ConnectException 
154                   || cause instanceof SocketTimeoutException)) {
155             LOG.warn("log replaying of " + filename + " can't connect to the target regionserver, "
156             		+ "resigning", e);
157             return Status.RESIGNED;
158           } else if (cause instanceof InterruptedException) {
159             LOG.warn("log splitting of " + filename + " interrupted, resigning", e);
160             return Status.RESIGNED;
161           } else if(cause instanceof KeeperException) {
162             LOG.warn("log splitting of " + filename + " hit ZooKeeper issue, resigning", e);
163             return Status.RESIGNED;
164           }
165           LOG.warn("log splitting of " + filename + " failed, returning error", e);
166           return Status.ERR;
167         }
168         return Status.DONE;
169       }
170     });
171   }
172 
173   @Override
174   public void run() {
175     try {
176       LOG.info("SplitLogWorker " + this.serverName + " starting");
177       this.watcher.registerListener(this);
178       boolean distributedLogReplay = HLogSplitter.isDistributedLogReplay(conf);
179       if (distributedLogReplay) {
180         // initialize a new connection for splitlogworker configuration
181         HConnectionManager.getConnection(conf);
182       }
183 
184       // wait for master to create the splitLogZnode
185       int res = -1;
186       while (res == -1 && !exitWorker) {
187         try {
188           res = ZKUtil.checkExists(watcher, watcher.splitLogZNode);
189         } catch (KeeperException e) {
190           // ignore
191           LOG.warn("Exception when checking for " + watcher.splitLogZNode  + " ... retrying", e);
192         }
193         if (res == -1) {
194           LOG.info(watcher.splitLogZNode + " znode does not exist, waiting for master to create");
195           Thread.sleep(1000);
196         }
197       }
198 
199       if (!exitWorker) {
200           taskLoop();
201       }
202     } catch (Throwable t) {
203       if (ExceptionUtil.isInterrupt(t)) {
204         LOG.info("SplitLogWorker interrupted. Exiting. " + (exitWorker ? "" :
205             " (ERROR: exitWorker is not set, exiting anyway)"));
206       } else {
207         // only a logical error can cause here. Printing it out
208         // to make debugging easier
209         LOG.error("unexpected error ", t);
210       }
211     } finally {
212       LOG.info("SplitLogWorker " + this.serverName + " exiting");
213     }
214   }
215 
216   /**
217    * Wait for tasks to become available at /hbase/splitlog zknode. Grab a task
218    * one at a time. This policy puts an upper-limit on the number of
219    * simultaneous log splitting that could be happening in a cluster.
220    * <p>
221    * Synchronization using {@link #taskReadyLock} ensures that it will
222    * try to grab every task that has been put up
223    */
224   private void taskLoop() throws InterruptedException {
225     while (!exitWorker) {
226       int seq_start = taskReadySeq;
227       List<String> paths = getTaskList();
228       if (paths == null) {
229         LOG.warn("Could not get tasks, did someone remove " +
230             this.watcher.splitLogZNode + " ... worker thread exiting.");
231         return;
232       }
233       // pick meta wal firstly
234       int offset = (int) (Math.random() * paths.size());
235       for(int i = 0; i < paths.size(); i ++){
236         if(HLogUtil.isMetaFile(paths.get(i))) {
237           offset = i;
238           break;
239         }
240       }
241       int numTasks = paths.size();
242       for (int i = 0; i < numTasks; i++) {
243         int idx = (i + offset) % paths.size();
244         // don't call ZKSplitLog.getNodeName() because that will lead to
245         // double encoding of the path name
246         if (this.calculateAvailableSplitters(numTasks) > 0) {
247           grabTask(ZKUtil.joinZNode(watcher.splitLogZNode, paths.get(idx)));
248         } else {
249           LOG.debug("Current region server " + this.serverName + " has "
250               + this.tasksInProgress.get() + " tasks in progress and can't take more.");
251           break;
252         }
253         if (exitWorker) {
254           return;
255         }
256       }
257       SplitLogCounters.tot_wkr_task_grabing.incrementAndGet();
258       synchronized (taskReadyLock) {
259         while (seq_start == taskReadySeq) {
260           taskReadyLock.wait(checkInterval);
261           if (this.server != null) {
262             // check to see if we have stale recovering regions in our internal memory state
263             Map<String, HRegion> recoveringRegions = this.server.getRecoveringRegions();
264             if (!recoveringRegions.isEmpty()) {
265               // Make a local copy to prevent ConcurrentModificationException when other threads
266               // modify recoveringRegions
267               List<String> tmpCopy = new ArrayList<String>(recoveringRegions.keySet());
268               for (String region : tmpCopy) {
269                 String nodePath = ZKUtil.joinZNode(this.watcher.recoveringRegionsZNode, region);
270                 try {
271                   if (ZKUtil.checkExists(this.watcher, nodePath) == -1) {
272                     HRegion r = recoveringRegions.remove(region);
273                     if (r != null) {
274                       r.setRecovering(false);
275                     }
276                     LOG.debug("Mark recovering region:" + region + " up.");
277                   } else {
278                     // current check is a defensive(or redundant) mechanism to prevent us from
279                     // having stale recovering regions in our internal RS memory state while
280                     // zookeeper(source of truth) says differently. We stop at the first good one
281                     // because we should not have a single instance such as this in normal case so
282                     // check the first one is good enough.
283                     break;
284                   }
285                 } catch (KeeperException e) {
286                   // ignore zookeeper error
287                   LOG.debug("Got a zookeeper when trying to open a recovering region", e);
288                   break;
289                 }
290               }
291             }
292           }
293         }
294       }
295     }
296   }
297 
298   /**
299    * try to grab a 'lock' on the task zk node to own and execute the task.
300    * <p>
301    * @param path zk node for the task
302    */
303   private void grabTask(String path) {
304     Stat stat = new Stat();
305     long t = -1;
306     byte[] data;
307     synchronized (grabTaskLock) {
308       currentTask = path;
309       workerInGrabTask = true;
310       if (Thread.interrupted()) {
311         return;
312       }
313     }
314     try {
315       try {
316         if ((data = ZKUtil.getDataNoWatch(this.watcher, path, stat)) == null) {
317           SplitLogCounters.tot_wkr_failed_to_grab_task_no_data.incrementAndGet();
318           return;
319         }
320       } catch (KeeperException e) {
321         LOG.warn("Failed to get data for znode " + path, e);
322         SplitLogCounters.tot_wkr_failed_to_grab_task_exception.incrementAndGet();
323         return;
324       }
325       SplitLogTask slt;
326       try {
327         slt = SplitLogTask.parseFrom(data);
328       } catch (DeserializationException e) {
329         LOG.warn("Failed parse data for znode " + path, e);
330         SplitLogCounters.tot_wkr_failed_to_grab_task_exception.incrementAndGet();
331         return;
332       }
333       if (!slt.isUnassigned()) {
334         SplitLogCounters.tot_wkr_failed_to_grab_task_owned.incrementAndGet();
335         return;
336       }
337 
338       currentVersion = attemptToOwnTask(true, watcher, serverName, path, stat.getVersion());
339       if (currentVersion < 0) {
340         SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.incrementAndGet();
341         return;
342       }
343 
344       if (ZKSplitLog.isRescanNode(watcher, currentTask)) {
345         HLogSplitterHandler.endTask(watcher, new SplitLogTask.Done(this.serverName),
346           SplitLogCounters.tot_wkr_task_acquired_rescan, currentTask, currentVersion);
347         return;
348       }
349 
350       LOG.info("worker " + serverName + " acquired task " + path);
351       SplitLogCounters.tot_wkr_task_acquired.incrementAndGet();
352       getDataSetWatchAsync();
353 
354       submitTask(path, currentVersion, this.report_period);
355 
356       // after a successful submit, sleep a little bit to allow other RSs to grab the rest tasks
357       try {
358         int sleepTime = RandomUtils.nextInt(500) + 500;
359         Thread.sleep(sleepTime);
360       } catch (InterruptedException e) {
361         LOG.warn("Interrupted while yielding for other region servers", e);
362         Thread.currentThread().interrupt();
363       }
364     } finally {
365       synchronized (grabTaskLock) {
366         workerInGrabTask = false;
367         // clear the interrupt from stopTask() otherwise the next task will
368         // suffer
369         Thread.interrupted();
370       }
371     }
372   }
373 
374 
375   /**
376    * Try to own the task by transitioning the zk node data from UNASSIGNED to OWNED.
377    * <p>
378    * This method is also used to periodically heartbeat the task progress by transitioning the node
379    * from OWNED to OWNED.
380    * <p>
381    * @param isFirstTime
382    * @param zkw
383    * @param server
384    * @param task
385    * @param taskZKVersion
386    * @return non-negative integer value when task can be owned by current region server otherwise -1
387    */
388   protected static int attemptToOwnTask(boolean isFirstTime, ZooKeeperWatcher zkw,
389       ServerName server, String task, int taskZKVersion) {
390     int latestZKVersion = FAILED_TO_OWN_TASK;
391     try {
392       SplitLogTask slt = new SplitLogTask.Owned(server);
393       Stat stat = zkw.getRecoverableZooKeeper().setData(task, slt.toByteArray(), taskZKVersion);
394       if (stat == null) {
395         LOG.warn("zk.setData() returned null for path " + task);
396         SplitLogCounters.tot_wkr_task_heartbeat_failed.incrementAndGet();
397         return FAILED_TO_OWN_TASK;
398       }
399       latestZKVersion = stat.getVersion();
400       SplitLogCounters.tot_wkr_task_heartbeat.incrementAndGet();
401       return latestZKVersion;
402     } catch (KeeperException e) {
403       if (!isFirstTime) {
404         if (e.code().equals(KeeperException.Code.NONODE)) {
405           LOG.warn("NONODE failed to assert ownership for " + task, e);
406         } else if (e.code().equals(KeeperException.Code.BADVERSION)) {
407           LOG.warn("BADVERSION failed to assert ownership for " + task, e);
408         } else {
409           LOG.warn("failed to assert ownership for " + task, e);
410         }
411       }
412     } catch (InterruptedException e1) {
413       LOG.warn("Interrupted while trying to assert ownership of " +
414           task + " " + StringUtils.stringifyException(e1));
415       Thread.currentThread().interrupt();
416     }
417     SplitLogCounters.tot_wkr_task_heartbeat_failed.incrementAndGet();
418     return FAILED_TO_OWN_TASK;
419   }
420 
421   /**
422    * This function calculates how many splitters it could create based on expected average tasks per
423    * RS and the hard limit upper bound(maxConcurrentTasks) set by configuration. <br>
424    * At any given time, a RS allows spawn MIN(Expected Tasks/RS, Hard Upper Bound)
425    * @param numTasks current total number of available tasks
426    */
427   private int calculateAvailableSplitters(int numTasks) {
428     // at lease one RS(itself) available
429     int availableRSs = 1;
430     try {
431       List<String> regionServers = ZKUtil.listChildrenNoWatch(watcher, watcher.rsZNode);
432       availableRSs = Math.max(availableRSs, (regionServers == null) ? 0 : regionServers.size());
433     } catch (KeeperException e) {
434       // do nothing
435       LOG.debug("getAvailableRegionServers got ZooKeeper exception", e);
436     }
437 
438     int expectedTasksPerRS = (numTasks / availableRSs) + ((numTasks % availableRSs == 0) ? 0 : 1);
439     expectedTasksPerRS = Math.max(1, expectedTasksPerRS); // at least be one
440     // calculate how many more splitters we could spawn
441     return Math.min(expectedTasksPerRS, this.maxConcurrentTasks) - this.tasksInProgress.get();
442   }
443 
444   /**
445    * Submit a log split task to executor service
446    * @param curTask
447    * @param curTaskZKVersion
448    */
449   void submitTask(final String curTask, final int curTaskZKVersion, final int reportPeriod) {
450     final MutableInt zkVersion = new MutableInt(curTaskZKVersion);
451 
452     CancelableProgressable reporter = new CancelableProgressable() {
453       private long last_report_at = 0;
454 
455       @Override
456       public boolean progress() {
457         long t = EnvironmentEdgeManager.currentTimeMillis();
458         if ((t - last_report_at) > reportPeriod) {
459           last_report_at = t;
460           int latestZKVersion =
461               attemptToOwnTask(false, watcher, serverName, curTask, zkVersion.intValue());
462           if (latestZKVersion < 0) {
463             LOG.warn("Failed to heartbeat the task" + curTask);
464             return false;
465           }
466           zkVersion.setValue(latestZKVersion);
467         }
468         return true;
469       }
470     };
471     
472     HLogSplitterHandler hsh =
473         new HLogSplitterHandler(this.server, curTask, zkVersion, reporter, this.tasksInProgress,
474             this.splitTaskExecutor);
475     this.executorService.submit(hsh);
476   }
477 
478   void getDataSetWatchAsync() {
479     this.watcher.getRecoverableZooKeeper().getZooKeeper().
480       getData(currentTask, this.watcher,
481       new GetDataAsyncCallback(), null);
482     SplitLogCounters.tot_wkr_get_data_queued.incrementAndGet();
483   }
484 
485   void getDataSetWatchSuccess(String path, byte[] data) {
486     SplitLogTask slt;
487     try {
488       slt = SplitLogTask.parseFrom(data);
489     } catch (DeserializationException e) {
490       LOG.warn("Failed parse", e);
491       return;
492     }
493     synchronized (grabTaskLock) {
494       if (workerInGrabTask) {
495         // currentTask can change but that's ok
496         String taskpath = currentTask;
497         if (taskpath != null && taskpath.equals(path)) {
498           // have to compare data. cannot compare version because then there
499           // will be race with attemptToOwnTask()
500           // cannot just check whether the node has been transitioned to
501           // UNASSIGNED because by the time this worker sets the data watch
502           // the node might have made two transitions - from owned by this
503           // worker to unassigned to owned by another worker
504           if (! slt.isOwned(this.serverName) &&
505               ! slt.isDone(this.serverName) &&
506               ! slt.isErr(this.serverName) &&
507               ! slt.isResigned(this.serverName)) {
508             LOG.info("task " + taskpath + " preempted from " +
509                 serverName + ", current task state and owner=" + slt.toString());
510             stopTask();
511           }
512         }
513       }
514     }
515   }
516 
517   void getDataSetWatchFailure(String path) {
518     synchronized (grabTaskLock) {
519       if (workerInGrabTask) {
520         // currentTask can change but that's ok
521         String taskpath = currentTask;
522         if (taskpath != null && taskpath.equals(path)) {
523           LOG.info("retrying data watch on " + path);
524           SplitLogCounters.tot_wkr_get_data_retry.incrementAndGet();
525           getDataSetWatchAsync();
526         } else {
527           // no point setting a watch on the task which this worker is not
528           // working upon anymore
529         }
530       }
531     }
532   }
533 
534   @Override
535   public void nodeDataChanged(String path) {
536     // there will be a self generated dataChanged event every time attemptToOwnTask()
537     // heartbeats the task znode by upping its version
538     synchronized (grabTaskLock) {
539       if (workerInGrabTask) {
540         // currentTask can change
541         String taskpath = currentTask;
542         if (taskpath!= null && taskpath.equals(path)) {
543           getDataSetWatchAsync();
544         }
545       }
546     }
547   }
548 
549 
550   private List<String> getTaskList() throws InterruptedException {
551     List<String> childrenPaths = null;
552     long sleepTime = 1000;
553     // It will be in loop till it gets the list of children or
554     // it will come out if worker thread exited.
555     while (!exitWorker) {
556       try {
557         childrenPaths = ZKUtil.listChildrenAndWatchForNewChildren(this.watcher,
558             this.watcher.splitLogZNode);
559         if (childrenPaths != null) {
560           return childrenPaths;
561         }
562       } catch (KeeperException e) {
563         LOG.warn("Could not get children of znode "
564             + this.watcher.splitLogZNode, e);
565       }
566         LOG.debug("Retry listChildren of znode " + this.watcher.splitLogZNode
567             + " after sleep for " + sleepTime + "ms!");
568         Thread.sleep(sleepTime);
569     }
570     return childrenPaths;
571   }
572 
573   @Override
574   public void nodeChildrenChanged(String path) {
575     if(path.equals(watcher.splitLogZNode)) {
576       LOG.debug("tasks arrived or departed");
577       synchronized (taskReadyLock) {
578         taskReadySeq++;
579         taskReadyLock.notify();
580       }
581     }
582   }
583 
584   /**
585    * If the worker is doing a task i.e. splitting a log file then stop the task.
586    * It doesn't exit the worker thread.
587    */
588   void stopTask() {
589     LOG.info("Sending interrupt to stop the worker thread");
590     worker.interrupt(); // TODO interrupt often gets swallowed, do what else?
591   }
592 
593 
594   /**
595    * start the SplitLogWorker thread
596    */
597   public void start() {
598     worker = new Thread(null, this, "SplitLogWorker-" + serverName);
599     exitWorker = false;
600     worker.start();
601   }
602 
603   /**
604    * stop the SplitLogWorker thread
605    */
606   public void stop() {
607     exitWorker = true;
608     stopTask();
609   }
610 
611   /**
612    * Asynchronous handler for zk get-data-set-watch on node results.
613    */
614   class GetDataAsyncCallback implements AsyncCallback.DataCallback {
615     private final Log LOG = LogFactory.getLog(GetDataAsyncCallback.class);
616 
617     @Override
618     public void processResult(int rc, String path, Object ctx, byte[] data, Stat stat) {
619       SplitLogCounters.tot_wkr_get_data_result.incrementAndGet();
620       if (rc != 0) {
621         LOG.warn("getdata rc = " + KeeperException.Code.get(rc) + " " + path);
622         getDataSetWatchFailure(path);
623         return;
624       }
625       data = watcher.getRecoverableZooKeeper().removeMetaData(data);
626       getDataSetWatchSuccess(path, data);
627     }
628   }
629 
630   /**
631    * Objects implementing this interface actually do the task that has been
632    * acquired by a {@link SplitLogWorker}. Since there isn't a water-tight
633    * guarantee that two workers will not be executing the same task therefore it
634    * is better to have workers prepare the task and then have the
635    * {@link SplitLogManager} commit the work in SplitLogManager.TaskFinisher
636    */
637   public interface TaskExecutor {
638     enum Status {
639       DONE(),
640       ERR(),
641       RESIGNED(),
642       PREEMPTED()
643     }
644     Status exec(String name, CancelableProgressable p);
645   }
646 }