View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.master;
19  
20  import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.CHECK;
21  import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.FORCE;
22  import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.DELETED;
23  import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.FAILURE;
24  import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.IN_PROGRESS;
25  import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.SUCCESS;
26  
27  import java.io.IOException;
28  import java.io.InterruptedIOException;
29  import java.util.ArrayList;
30  import java.util.Arrays;
31  import java.util.Collections;
32  import java.util.HashSet;
33  import java.util.List;
34  import java.util.Map;
35  import java.util.Set;
36  import java.util.concurrent.ConcurrentHashMap;
37  import java.util.concurrent.ConcurrentMap;
38  import java.util.concurrent.atomic.AtomicInteger;
39  import java.util.concurrent.locks.ReentrantLock;
40  
41  import org.apache.commons.logging.Log;
42  import org.apache.commons.logging.LogFactory;
43  import org.apache.hadoop.conf.Configuration;
44  import org.apache.hadoop.fs.FileStatus;
45  import org.apache.hadoop.fs.FileSystem;
46  import org.apache.hadoop.fs.Path;
47  import org.apache.hadoop.fs.PathFilter;
48  import org.apache.hadoop.hbase.ChoreService;
49  import org.apache.hadoop.hbase.CoordinatedStateManager;
50  import org.apache.hadoop.hbase.HRegionInfo;
51  import org.apache.hadoop.hbase.ScheduledChore;
52  import org.apache.hadoop.hbase.Server;
53  import org.apache.hadoop.hbase.ServerName;
54  import org.apache.hadoop.hbase.SplitLogCounters;
55  import org.apache.hadoop.hbase.Stoppable;
56  import org.apache.hadoop.hbase.classification.InterfaceAudience;
57  import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
58  import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination;
59  import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails;
60  import org.apache.hadoop.hbase.monitoring.MonitoredTask;
61  import org.apache.hadoop.hbase.monitoring.TaskMonitor;
62  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
63  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
64  import org.apache.hadoop.hbase.util.FSUtils;
65  import org.apache.hadoop.hbase.util.Pair;
66  import org.apache.hadoop.hbase.wal.DefaultWALProvider;
67  import org.apache.hadoop.hbase.wal.WALFactory;
68  
69  import com.google.common.annotations.VisibleForTesting;
70  
71  /**
72   * Distributes the task of log splitting to the available region servers.
73   * Coordination happens via coordination engine. For every log file that has to be split a
74   * task is created. SplitLogWorkers race to grab a task.
75   *
76   * <p>SplitLogManager monitors the tasks that it creates using the
77   * timeoutMonitor thread. If a task's progress is slow then
78   * {@link SplitLogManagerCoordination#checkTasks} will take away the
79   * task from the owner {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} 
80   * and the task will be up for grabs again. When the task is done then it is 
81   * deleted by SplitLogManager.
82   *
83   * <p>Clients call {@link #splitLogDistributed(Path)} to split a region server's
84   * log files. The caller thread waits in this method until all the log files
85   * have been split.
86   *
87   * <p>All the coordination calls made by this class are asynchronous. This is mainly
88   * to help reduce response time seen by the callers.
89   *
90   * <p>There is race in this design between the SplitLogManager and the
91   * SplitLogWorker. SplitLogManager might re-queue a task that has in reality
92   * already been completed by a SplitLogWorker. We rely on the idempotency of
93   * the log splitting task for correctness.
94   *
95   * <p>It is also assumed that every log splitting task is unique and once
96   * completed (either with success or with error) it will be not be submitted
97   * again. If a task is resubmitted then there is a risk that old "delete task"
98   * can delete the re-submission.
99   */
100 @InterfaceAudience.Private
101 public class SplitLogManager {
102   private static final Log LOG = LogFactory.getLog(SplitLogManager.class);
103 
104   private Server server;
105 
106   private final Stoppable stopper;
107   private final Configuration conf;
108   private final ChoreService choreService;
109 
110   public static final int DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min
111 
112   private long unassignedTimeout;
113   private long lastTaskCreateTime = Long.MAX_VALUE;
114   private long checkRecoveringTimeThreshold = 15000; // 15 seconds
115   private final List<Pair<Set<ServerName>, Boolean>> failedRecoveringRegionDeletions = Collections
116       .synchronizedList(new ArrayList<Pair<Set<ServerName>, Boolean>>());
117 
118   /**
119    * In distributedLogReplay mode, we need touch both splitlog and recovering-regions znodes in one
120    * operation. So the lock is used to guard such cases.
121    */
122   protected final ReentrantLock recoveringRegionLock = new ReentrantLock();
123 
124   private final ConcurrentMap<String, Task> tasks = new ConcurrentHashMap<String, Task>();
125   private TimeoutMonitor timeoutMonitor;
126 
127   private volatile Set<ServerName> deadWorkers = null;
128   private final Object deadWorkersLock = new Object();
129 
130   /**
131    * Its OK to construct this object even when region-servers are not online. It does lookup the
132    * orphan tasks in coordination engine but it doesn't block waiting for them to be done.
133    * @param server the server instance
134    * @param conf the HBase configuration
135    * @param stopper the stoppable in case anything is wrong
136    * @param master the master services
137    * @param serverName the master server name
138    * @throws IOException
139    */
140   public SplitLogManager(Server server, Configuration conf, Stoppable stopper,
141       MasterServices master, ServerName serverName) throws IOException {
142     this.server = server;
143     this.conf = conf;
144     this.stopper = stopper;
145     this.choreService = new ChoreService(serverName.toString() + "_splitLogManager_");
146     if (server.getCoordinatedStateManager() != null) {
147       SplitLogManagerCoordination coordination =
148           ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
149               .getSplitLogManagerCoordination();
150       Set<String> failedDeletions = Collections.synchronizedSet(new HashSet<String>());
151       SplitLogManagerDetails details =
152           new SplitLogManagerDetails(tasks, master, failedDeletions, serverName);
153       coordination.setDetails(details);
154       coordination.init();
155       // Determine recovery mode
156     }
157     this.unassignedTimeout =
158         conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT);
159     this.timeoutMonitor =
160         new TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 1000),
161             stopper);
162     choreService.scheduleChore(timeoutMonitor);
163   }
164 
165   private FileStatus[] getFileList(List<Path> logDirs, PathFilter filter) throws IOException {
166     return getFileList(conf, logDirs, filter);
167   }
168 
169   /**
170    * Get a list of paths that need to be split given a set of server-specific directories and
171    * optionally  a filter.
172    *
173    * See {@link DefaultWALProvider#getServerNameFromWALDirectoryName} for more info on directory
174    * layout.
175    *
176    * Should be package-private, but is needed by
177    * {@link org.apache.hadoop.hbase.wal.WALSplitter#split(Path, Path, Path, FileSystem,
178    *     Configuration, WALFactory)} for tests.
179    */
180   @VisibleForTesting
181   public static FileStatus[] getFileList(final Configuration conf, final List<Path> logDirs,
182       final PathFilter filter)
183       throws IOException {
184     List<FileStatus> fileStatus = new ArrayList<FileStatus>();
185     for (Path logDir : logDirs) {
186       final FileSystem fs = logDir.getFileSystem(conf);
187       if (!fs.exists(logDir)) {
188         LOG.warn(logDir + " doesn't exist. Nothing to do!");
189         continue;
190       }
191       FileStatus[] logfiles = FSUtils.listStatus(fs, logDir, filter);
192       if (logfiles == null || logfiles.length == 0) {
193         LOG.info(logDir + " is empty dir, no logs to split");
194       } else {
195         Collections.addAll(fileStatus, logfiles);
196       }
197     }
198     FileStatus[] a = new FileStatus[fileStatus.size()];
199     return fileStatus.toArray(a);
200   }
201 
202   /**
203    * @param logDir one region sever wal dir path in .logs
204    * @throws IOException if there was an error while splitting any log file
205    * @return cumulative size of the logfiles split
206    * @throws IOException
207    */
208   public long splitLogDistributed(final Path logDir) throws IOException {
209     List<Path> logDirs = new ArrayList<Path>();
210     logDirs.add(logDir);
211     return splitLogDistributed(logDirs);
212   }
213 
214   /**
215    * The caller will block until all the log files of the given region server have been processed -
216    * successfully split or an error is encountered - by an available worker region server. This
217    * method must only be called after the region servers have been brought online.
218    * @param logDirs List of log dirs to split
219    * @throws IOException If there was an error while splitting any log file
220    * @return cumulative size of the logfiles split
221    */
222   public long splitLogDistributed(final List<Path> logDirs) throws IOException {
223     if (logDirs.isEmpty()) {
224       return 0;
225     }
226     Set<ServerName> serverNames = new HashSet<ServerName>();
227     for (Path logDir : logDirs) {
228       try {
229         ServerName serverName = DefaultWALProvider.getServerNameFromWALDirectoryName(logDir);
230         if (serverName != null) {
231           serverNames.add(serverName);
232         }
233       } catch (IllegalArgumentException e) {
234         // ignore invalid format error.
235         LOG.warn("Cannot parse server name from " + logDir);
236       }
237     }
238     return splitLogDistributed(serverNames, logDirs, null);
239   }
240 
241   /**
242    * The caller will block until all the hbase:meta log files of the given region server have been
243    * processed - successfully split or an error is encountered - by an available worker region
244    * server. This method must only be called after the region servers have been brought online.
245    * @param logDirs List of log dirs to split
246    * @param filter the Path filter to select specific files for considering
247    * @throws IOException If there was an error while splitting any log file
248    * @return cumulative size of the logfiles split
249    */
250   public long splitLogDistributed(final Set<ServerName> serverNames, final List<Path> logDirs,
251       PathFilter filter) throws IOException {
252     MonitoredTask status = TaskMonitor.get().createStatus("Doing distributed log split in " +
253       logDirs + " for serverName=" + serverNames);
254     FileStatus[] logfiles = getFileList(logDirs, filter);
255     status.setStatus("Checking directory contents...");
256     SplitLogCounters.tot_mgr_log_split_batch_start.incrementAndGet();
257     LOG.info("Started splitting " + logfiles.length + " logs in " + logDirs +
258       " for " + serverNames);
259     long t = EnvironmentEdgeManager.currentTime();
260     long totalSize = 0;
261     TaskBatch batch = new TaskBatch();
262     Boolean isMetaRecovery = (filter == null) ? null : false;
263     for (FileStatus lf : logfiles) {
264       // TODO If the log file is still being written to - which is most likely
265       // the case for the last log file - then its length will show up here
266       // as zero. The size of such a file can only be retrieved after
267       // recover-lease is done. totalSize will be under in most cases and the
268       // metrics that it drives will also be under-reported.
269       totalSize += lf.getLen();
270       String pathToLog = FSUtils.removeRootPath(lf.getPath(), conf);
271       if (!enqueueSplitTask(pathToLog, batch)) {
272         throw new IOException("duplicate log split scheduled for " + lf.getPath());
273       }
274     }
275     waitForSplittingCompletion(batch, status);
276     // remove recovering regions
277     if (filter == MasterFileSystem.META_FILTER /* reference comparison */) {
278       // we split meta regions and user regions separately therefore logfiles are either all for
279       // meta or user regions but won't for both( we could have mixed situations in tests)
280       isMetaRecovery = true;
281     }
282     removeRecoveringRegions(serverNames, isMetaRecovery);
283 
284     if (batch.done != batch.installed) {
285       batch.isDead = true;
286       SplitLogCounters.tot_mgr_log_split_batch_err.incrementAndGet();
287       LOG.warn("error while splitting logs in " + logDirs + " installed = " + batch.installed
288           + " but only " + batch.done + " done");
289       String msg = "error or interrupted while splitting logs in " + logDirs + " Task = " + batch;
290       status.abort(msg);
291       throw new IOException(msg);
292     }
293     for (Path logDir : logDirs) {
294       status.setStatus("Cleaning up log directory...");
295       final FileSystem fs = logDir.getFileSystem(conf);
296       try {
297         if (fs.exists(logDir) && !fs.delete(logDir, false)) {
298           LOG.warn("Unable to delete log src dir. Ignoring. " + logDir);
299         }
300       } catch (IOException ioe) {
301         FileStatus[] files = fs.listStatus(logDir);
302         if (files != null && files.length > 0) {
303           LOG.warn("Returning success without actually splitting and "
304               + "deleting all the log files in path " + logDir + ": "
305               + Arrays.toString(files), ioe);
306         } else {
307           LOG.warn("Unable to delete log src dir. Ignoring. " + logDir, ioe);
308         }
309       }
310       SplitLogCounters.tot_mgr_log_split_batch_success.incrementAndGet();
311     }
312     String msg =
313         "finished splitting (more than or equal to) " + totalSize + " bytes in " + batch.installed
314             + " log files in " + logDirs + " in "
315             + (EnvironmentEdgeManager.currentTime() - t) + "ms";
316     status.markComplete(msg);
317     LOG.info(msg);
318     return totalSize;
319   }
320 
321   /**
322    * Add a task entry to coordination if it is not already there.
323    * @param taskname the path of the log to be split
324    * @param batch the batch this task belongs to
325    * @return true if a new entry is created, false if it is already there.
326    */
327   boolean enqueueSplitTask(String taskname, TaskBatch batch) {
328     lastTaskCreateTime = EnvironmentEdgeManager.currentTime();
329     String task =
330         ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
331             .getSplitLogManagerCoordination().prepareTask(taskname);
332     Task oldtask = createTaskIfAbsent(task, batch);
333     if (oldtask == null) {
334       // publish the task in the coordination engine
335       ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
336           .getSplitLogManagerCoordination().submitTask(task);
337       return true;
338     }
339     return false;
340   }
341 
342   private void waitForSplittingCompletion(TaskBatch batch, MonitoredTask status) {
343     synchronized (batch) {
344       while ((batch.done + batch.error) != batch.installed) {
345         try {
346           status.setStatus("Waiting for distributed tasks to finish. " + " scheduled="
347               + batch.installed + " done=" + batch.done + " error=" + batch.error);
348           int remaining = batch.installed - (batch.done + batch.error);
349           int actual = activeTasks(batch);
350           if (remaining != actual) {
351             LOG.warn("Expected " + remaining + " active tasks, but actually there are " + actual);
352           }
353           int remainingTasks =
354               ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
355                   .getSplitLogManagerCoordination().remainingTasksInCoordination();
356           if (remainingTasks >= 0 && actual > remainingTasks) {
357             LOG.warn("Expected at least" + actual + " tasks remaining, but actually there are "
358                 + remainingTasks);
359           }
360           if (remainingTasks == 0 || actual == 0) {
361             LOG.warn("No more task remaining, splitting "
362                 + "should have completed. Remaining tasks is " + remainingTasks
363                 + ", active tasks in map " + actual);
364             if (remainingTasks == 0 && actual == 0) {
365               return;
366             }
367           }
368           batch.wait(100);
369           if (stopper.isStopped()) {
370             LOG.warn("Stopped while waiting for log splits to be completed");
371             return;
372           }
373         } catch (InterruptedException e) {
374           LOG.warn("Interrupted while waiting for log splits to be completed");
375           Thread.currentThread().interrupt();
376           return;
377         }
378       }
379     }
380   }
381 
382   @VisibleForTesting
383   ConcurrentMap<String, Task> getTasks() {
384     return tasks;
385   }
386 
387   private int activeTasks(final TaskBatch batch) {
388     int count = 0;
389     for (Task t : tasks.values()) {
390       if (t.batch == batch && t.status == TerminationStatus.IN_PROGRESS) {
391         count++;
392       }
393     }
394     return count;
395 
396   }
397 
398   /**
399    * It removes recovering regions under /hbase/recovering-regions/[encoded region name] so that the
400    * region server hosting the region can allow reads to the recovered region
401    * @param serverNames servers which are just recovered
402    * @param isMetaRecovery whether current recovery is for the meta region on {@code serverNames}
403    */
404   private void removeRecoveringRegions(final Set<ServerName> serverNames, Boolean isMetaRecovery) {
405     if (!isLogReplaying()) {
406       // the function is only used in WALEdit direct replay mode
407       return;
408     }
409     if (serverNames == null || serverNames.isEmpty()) return;
410 
411     Set<String> recoveredServerNameSet = new HashSet<String>();
412     for (ServerName tmpServerName : serverNames) {
413       recoveredServerNameSet.add(tmpServerName.getServerName());
414     }
415    
416     this.recoveringRegionLock.lock();
417     try {
418       ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
419           .getSplitLogManagerCoordination().removeRecoveringRegions(recoveredServerNameSet,
420             isMetaRecovery);
421     } catch (IOException e) {
422       LOG.warn("removeRecoveringRegions got exception. Will retry", e);
423       if (serverNames != null && !serverNames.isEmpty()) {
424         this.failedRecoveringRegionDeletions.add(new Pair<Set<ServerName>, Boolean>(serverNames,
425             isMetaRecovery));
426       }
427     } finally {
428       this.recoveringRegionLock.unlock();
429     }
430   }
431 
432   /**
433    * It removes stale recovering regions under /hbase/recovering-regions/[encoded region name]
434    * during master initialization phase.
435    * @param failedServers A set of known failed servers
436    * @throws IOException
437    */
438   void removeStaleRecoveringRegions(final Set<ServerName> failedServers) throws IOException,
439       InterruptedIOException {
440     Set<String> knownFailedServers = new HashSet<String>();
441     if (failedServers != null) {
442       for (ServerName tmpServerName : failedServers) {
443         knownFailedServers.add(tmpServerName.getServerName());
444       }
445     }
446 
447     this.recoveringRegionLock.lock();
448     try {
449       ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
450           .getSplitLogManagerCoordination().removeStaleRecoveringRegions(knownFailedServers);
451     } finally {
452       this.recoveringRegionLock.unlock();
453     }
454   }
455 
456   /**
457    * @param path
458    * @param batch
459    * @return null on success, existing task on error
460    */
461   private Task createTaskIfAbsent(String path, TaskBatch batch) {
462     Task oldtask;
463     // batch.installed is only changed via this function and
464     // a single thread touches batch.installed.
465     Task newtask = new Task();
466     newtask.batch = batch;
467     oldtask = tasks.putIfAbsent(path, newtask);
468     if (oldtask == null) {
469       batch.installed++;
470       return null;
471     }
472     // new task was not used.
473     synchronized (oldtask) {
474       if (oldtask.isOrphan()) {
475         if (oldtask.status == SUCCESS) {
476           // The task is already done. Do not install the batch for this
477           // task because it might be too late for setDone() to update
478           // batch.done. There is no need for the batch creator to wait for
479           // this task to complete.
480           return (null);
481         }
482         if (oldtask.status == IN_PROGRESS) {
483           oldtask.batch = batch;
484           batch.installed++;
485           LOG.debug("Previously orphan task " + path + " is now being waited upon");
486           return null;
487         }
488         while (oldtask.status == FAILURE) {
489           LOG.debug("wait for status of task " + path + " to change to DELETED");
490           SplitLogCounters.tot_mgr_wait_for_zk_delete.incrementAndGet();
491           try {
492             oldtask.wait();
493           } catch (InterruptedException e) {
494             Thread.currentThread().interrupt();
495             LOG.warn("Interrupted when waiting for znode delete callback");
496             // fall through to return failure
497             break;
498           }
499         }
500         if (oldtask.status != DELETED) {
501           LOG.warn("Failure because previously failed task"
502               + " state still present. Waiting for znode delete callback" + " path=" + path);
503           return oldtask;
504         }
505         // reinsert the newTask and it must succeed this time
506         Task t = tasks.putIfAbsent(path, newtask);
507         if (t == null) {
508           batch.installed++;
509           return null;
510         }
511         LOG.fatal("Logic error. Deleted task still present in tasks map");
512         assert false : "Deleted task still present in tasks map";
513         return t;
514       }
515       LOG.warn("Failure because two threads can't wait for the same task; path=" + path);
516       return oldtask;
517     }
518   }
519 
520   Task findOrCreateOrphanTask(String path) {
521     Task orphanTask = new Task();
522     Task task;
523     task = tasks.putIfAbsent(path, orphanTask);
524     if (task == null) {
525       LOG.info("creating orphan task " + path);
526       SplitLogCounters.tot_mgr_orphan_task_acquired.incrementAndGet();
527       task = orphanTask;
528     }
529     return task;
530   }
531 
532   public void stop() {
533     if (choreService != null) {
534       choreService.shutdown();
535     }
536     if (timeoutMonitor != null) {
537       timeoutMonitor.cancel(true);
538     }
539   }
540 
541   void handleDeadWorker(ServerName workerName) {
542     // resubmit the tasks on the TimeoutMonitor thread. Makes it easier
543     // to reason about concurrency. Makes it easier to retry.
544     synchronized (deadWorkersLock) {
545       if (deadWorkers == null) {
546         deadWorkers = new HashSet<ServerName>(100);
547       }
548       deadWorkers.add(workerName);
549     }
550     LOG.info("dead splitlog worker " + workerName);
551   }
552 
553   void handleDeadWorkers(Set<ServerName> serverNames) {
554     synchronized (deadWorkersLock) {
555       if (deadWorkers == null) {
556         deadWorkers = new HashSet<ServerName>(100);
557       }
558       deadWorkers.addAll(serverNames);
559     }
560     LOG.info("dead splitlog workers " + serverNames);
561   }
562 
563   /**
564    * This function is to set recovery mode from outstanding split log tasks from before or current
565    * configuration setting
566    * @param isForInitialization
567    * @throws IOException throws if it's impossible to set recovery mode
568    */
569   public void setRecoveryMode(boolean isForInitialization) throws IOException {
570     ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
571         .getSplitLogManagerCoordination().setRecoveryMode(isForInitialization);
572 
573   }
574 
575   public void markRegionsRecovering(ServerName server, Set<HRegionInfo> userRegions)
576       throws InterruptedIOException, IOException {
577     if (userRegions == null || (!isLogReplaying())) {
578       return;
579     }
580     try {
581       this.recoveringRegionLock.lock();
582       // mark that we're creating recovering regions
583       ((BaseCoordinatedStateManager) this.server.getCoordinatedStateManager())
584           .getSplitLogManagerCoordination().markRegionsRecovering(server, userRegions);
585     } finally {
586       this.recoveringRegionLock.unlock();
587     }
588 
589   }
590 
591   /**
592    * @return whether log is replaying
593    */
594   public boolean isLogReplaying() {
595     CoordinatedStateManager m = server.getCoordinatedStateManager();
596     if (m == null) return false;
597     return ((BaseCoordinatedStateManager)m).getSplitLogManagerCoordination().isReplaying();
598   }
599 
600   /**
601    * @return whether log is splitting
602    */
603   public boolean isLogSplitting() {
604     if (server.getCoordinatedStateManager() == null) return false;
605     return ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
606         .getSplitLogManagerCoordination().isSplitting();
607   }
608 
609   /**
610    * @return the current log recovery mode
611    */
612   public RecoveryMode getRecoveryMode() {
613     return ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
614         .getSplitLogManagerCoordination().getRecoveryMode();
615   }
616 
617   /**
618    * Keeps track of the batch of tasks submitted together by a caller in splitLogDistributed().
619    * Clients threads use this object to wait for all their tasks to be done.
620    * <p>
621    * All access is synchronized.
622    */
623   @InterfaceAudience.Private
624   public static class TaskBatch {
625     public int installed = 0;
626     public int done = 0;
627     public int error = 0;
628     public volatile boolean isDead = false;
629 
630     @Override
631     public String toString() {
632       return ("installed = " + installed + " done = " + done + " error = " + error);
633     }
634   }
635 
636   /**
637    * in memory state of an active task.
638    */
639   @InterfaceAudience.Private
640   public static class Task {
641     public volatile long last_update;
642     public volatile int last_version;
643     public volatile ServerName cur_worker_name;
644     public volatile TaskBatch batch;
645     public volatile TerminationStatus status;
646     public volatile AtomicInteger incarnation = new AtomicInteger(0);
647     public final AtomicInteger unforcedResubmits = new AtomicInteger();
648     public volatile boolean resubmitThresholdReached;
649 
650     @Override
651     public String toString() {
652       return ("last_update = " + last_update + " last_version = " + last_version
653           + " cur_worker_name = " + cur_worker_name + " status = " + status + " incarnation = "
654           + incarnation + " resubmits = " + unforcedResubmits.get() + " batch = " + batch);
655     }
656 
657     public Task() {
658       last_version = -1;
659       status = IN_PROGRESS;
660       setUnassigned();
661     }
662 
663     public boolean isOrphan() {
664       return (batch == null || batch.isDead);
665     }
666 
667     public boolean isUnassigned() {
668       return (cur_worker_name == null);
669     }
670 
671     public void heartbeatNoDetails(long time) {
672       last_update = time;
673     }
674 
675     public void heartbeat(long time, int version, ServerName worker) {
676       last_version = version;
677       last_update = time;
678       cur_worker_name = worker;
679     }
680 
681     public void setUnassigned() {
682       cur_worker_name = null;
683       last_update = -1;
684     }
685   }
686 
687   /**
688    * Periodically checks all active tasks and resubmits the ones that have timed out
689    */
690   private class TimeoutMonitor extends ScheduledChore {
691     private long lastLog = 0;
692 
693     public TimeoutMonitor(final int period, Stoppable stopper) {
694       super("SplitLogManager Timeout Monitor", stopper, period);
695     }
696 
697     @Override
698     protected void chore() {
699       int resubmitted = 0;
700       int unassigned = 0;
701       int tot = 0;
702       boolean found_assigned_task = false;
703       Set<ServerName> localDeadWorkers;
704 
705       synchronized (deadWorkersLock) {
706         localDeadWorkers = deadWorkers;
707         deadWorkers = null;
708       }
709 
710       for (Map.Entry<String, Task> e : tasks.entrySet()) {
711         String path = e.getKey();
712         Task task = e.getValue();
713         ServerName cur_worker = task.cur_worker_name;
714         tot++;
715         // don't easily resubmit a task which hasn't been picked up yet. It
716         // might be a long while before a SplitLogWorker is free to pick up a
717         // task. This is because a SplitLogWorker picks up a task one at a
718         // time. If we want progress when there are no region servers then we
719         // will have to run a SplitLogWorker thread in the Master.
720         if (task.isUnassigned()) {
721           unassigned++;
722           continue;
723         }
724         found_assigned_task = true;
725         if (localDeadWorkers != null && localDeadWorkers.contains(cur_worker)) {
726           SplitLogCounters.tot_mgr_resubmit_dead_server_task.incrementAndGet();
727           if (((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
728               .getSplitLogManagerCoordination().resubmitTask(path, task, FORCE)) {
729             resubmitted++;
730           } else {
731             handleDeadWorker(cur_worker);
732             LOG.warn("Failed to resubmit task " + path + " owned by dead " + cur_worker
733                 + ", will retry.");
734           }
735         } else if (((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
736             .getSplitLogManagerCoordination().resubmitTask(path, task, CHECK)) {
737           resubmitted++;
738         }
739       }
740       if (tot > 0) {
741         long now = EnvironmentEdgeManager.currentTime();
742         if (now > lastLog + 5000) {
743           lastLog = now;
744           LOG.info("total tasks = " + tot + " unassigned = " + unassigned + " tasks=" + tasks);
745         }
746       }
747       if (resubmitted > 0) {
748         LOG.info("resubmitted " + resubmitted + " out of " + tot + " tasks");
749       }
750       // If there are pending tasks and all of them have been unassigned for
751       // some time then put up a RESCAN node to ping the workers.
752       // ZKSplitlog.DEFAULT_UNASSIGNED_TIMEOUT is of the order of minutes
753       // because a. it is very unlikely that every worker had a
754       // transient error when trying to grab the task b. if there are no
755       // workers then all tasks wills stay unassigned indefinitely and the
756       // manager will be indefinitely creating RESCAN nodes. TODO may be the
757       // master should spawn both a manager and a worker thread to guarantee
758       // that there is always one worker in the system
759       if (tot > 0
760           && !found_assigned_task
761           && ((EnvironmentEdgeManager.currentTime() - lastTaskCreateTime) > unassignedTimeout)) {
762         for (Map.Entry<String, Task> e : tasks.entrySet()) {
763           String key = e.getKey();
764           Task task = e.getValue();
765           // we have to do task.isUnassigned() check again because tasks might
766           // have been asynchronously assigned. There is no locking required
767           // for these checks ... it is OK even if tryGetDataSetWatch() is
768           // called unnecessarily for a taskpath
769           if (task.isUnassigned() && (task.status != FAILURE)) {
770             // We just touch the znode to make sure its still there
771             ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
772                 .getSplitLogManagerCoordination().checkTaskStillAvailable(key);
773           }
774         }
775         ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
776             .getSplitLogManagerCoordination().checkTasks();
777         SplitLogCounters.tot_mgr_resubmit_unassigned.incrementAndGet();
778         LOG.debug("resubmitting unassigned task(s) after timeout");
779       }
780       Set<String> failedDeletions =
781           ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
782               .getSplitLogManagerCoordination().getDetails().getFailedDeletions();
783       // Retry previously failed deletes
784       if (failedDeletions.size() > 0) {
785         List<String> tmpPaths = new ArrayList<String>(failedDeletions);
786         for (String tmpPath : tmpPaths) {
787           // deleteNode is an async call
788           ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
789               .getSplitLogManagerCoordination().deleteTask(tmpPath);
790         }
791         failedDeletions.removeAll(tmpPaths);
792       }
793 
794       // Garbage collect left-over
795       long timeInterval =
796           EnvironmentEdgeManager.currentTime()
797               - ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
798                   .getSplitLogManagerCoordination().getLastRecoveryTime();
799       if (!failedRecoveringRegionDeletions.isEmpty()
800           || (tot == 0 && tasks.size() == 0 && (timeInterval > checkRecoveringTimeThreshold))) {
801         // inside the function there have more checks before GC anything
802         if (!failedRecoveringRegionDeletions.isEmpty()) {
803           List<Pair<Set<ServerName>, Boolean>> previouslyFailedDeletions =
804               new ArrayList<Pair<Set<ServerName>, Boolean>>(failedRecoveringRegionDeletions);
805           failedRecoveringRegionDeletions.removeAll(previouslyFailedDeletions);
806           for (Pair<Set<ServerName>, Boolean> failedDeletion : previouslyFailedDeletions) {
807             removeRecoveringRegions(failedDeletion.getFirst(), failedDeletion.getSecond());
808           }
809         } else {
810           removeRecoveringRegions(null, null);
811         }
812       }
813     }
814   }
815 
816   public enum ResubmitDirective {
817     CHECK(), FORCE();
818   }
819 
820   public enum TerminationStatus {
821     IN_PROGRESS("in_progress"), SUCCESS("success"), FAILURE("failure"), DELETED("deleted");
822 
823     String statusMsg;
824 
825     TerminationStatus(String msg) {
826       statusMsg = msg;
827     }
828 
829     @Override
830     public String toString() {
831       return statusMsg;
832     }
833   }
834 }