View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.net.ConnectException;
24  import java.net.SocketTimeoutException;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.hbase.classification.InterfaceAudience;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.fs.FileSystem;
31  import org.apache.hadoop.fs.Path;
32  import org.apache.hadoop.hbase.NotServingRegionException;
33  import org.apache.hadoop.hbase.Server;
34  import org.apache.hadoop.hbase.client.RetriesExhaustedException;
35  import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
36  import org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination;
37  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
38  import org.apache.hadoop.hbase.wal.WALFactory;
39  import org.apache.hadoop.hbase.wal.WALSplitter;
40  import org.apache.hadoop.hbase.util.CancelableProgressable;
41  import org.apache.hadoop.hbase.util.ExceptionUtil;
42  import org.apache.hadoop.hbase.util.FSUtils;
43  
44  import com.google.common.annotations.VisibleForTesting;
45  
46  /**
47   * This worker is spawned in every regionserver, including master. The Worker waits for log
48   * splitting tasks to be put up by the {@link org.apache.hadoop.hbase.master.SplitLogManager} 
49   * running in the master and races with other workers in other serves to acquire those tasks. 
50   * The coordination is done via coordination engine.
51   * <p>
52   * If a worker has successfully moved the task from state UNASSIGNED to OWNED then it owns the task.
53   * It keeps heart beating the manager by periodically moving the task from UNASSIGNED to OWNED
54   * state. On success it moves the task to TASK_DONE. On unrecoverable error it moves task state to
55   * ERR. If it cannot continue but wants the master to retry the task then it moves the task state to
56   * RESIGNED.
57   * <p>
58   * The manager can take a task away from a worker by moving the task from OWNED to UNASSIGNED. In
59   * the absence of a global lock there is a unavoidable race here - a worker might have just finished
60   * its task when it is stripped of its ownership. Here we rely on the idempotency of the log
61   * splitting task for correctness
62   */
63  @InterfaceAudience.Private
64  public class SplitLogWorker implements Runnable {
65  
66    private static final Log LOG = LogFactory.getLog(SplitLogWorker.class);
67  
68    Thread worker;
69    // thread pool which executes recovery work
70    private SplitLogWorkerCoordination coordination;
71    private Configuration conf;
72    private RegionServerServices server;
73  
74    public SplitLogWorker(Server hserver, Configuration conf, RegionServerServices server,
75        TaskExecutor splitTaskExecutor) {
76      this.server = server;
77      this.conf = conf;
78      this.coordination =
79          ((BaseCoordinatedStateManager) hserver.getCoordinatedStateManager())
80              .getSplitLogWorkerCoordination();
81      this.server = server;
82      coordination.init(server, conf, splitTaskExecutor, this);
83    }
84  
85    public SplitLogWorker(final Server hserver, final Configuration conf,
86        final RegionServerServices server, final LastSequenceId sequenceIdChecker,
87        final WALFactory factory) {
88      this(server, conf, server, new TaskExecutor() {
89        @Override
90        public Status exec(String filename, RecoveryMode mode, CancelableProgressable p) {
91          Path rootdir;
92          FileSystem fs;
93          try {
94            rootdir = FSUtils.getRootDir(conf);
95            fs = rootdir.getFileSystem(conf);
96          } catch (IOException e) {
97            LOG.warn("could not find root dir or fs", e);
98            return Status.RESIGNED;
99          }
100         // TODO have to correctly figure out when log splitting has been
101         // interrupted or has encountered a transient error and when it has
102         // encountered a bad non-retry-able persistent error.
103         try {
104           if (!WALSplitter.splitLogFile(rootdir, fs.getFileStatus(new Path(rootdir, filename)),
105             fs, conf, p, sequenceIdChecker, server.getCoordinatedStateManager(), mode, factory)) {
106             return Status.PREEMPTED;
107           }
108         } catch (InterruptedIOException iioe) {
109           LOG.warn("log splitting of " + filename + " interrupted, resigning", iioe);
110           return Status.RESIGNED;
111         } catch (IOException e) {
112           Throwable cause = e.getCause();
113           if (e instanceof RetriesExhaustedException && (cause instanceof NotServingRegionException
114                   || cause instanceof ConnectException
115                   || cause instanceof SocketTimeoutException)) {
116             LOG.warn("log replaying of " + filename + " can't connect to the target regionserver, "
117                 + "resigning", e);
118             return Status.RESIGNED;
119           } else if (cause instanceof InterruptedException) {
120             LOG.warn("log splitting of " + filename + " interrupted, resigning", e);
121             return Status.RESIGNED;
122           }
123           LOG.warn("log splitting of " + filename + " failed, returning error", e);
124           return Status.ERR;
125         }
126         return Status.DONE;
127       }
128     });
129   }
130 
131   @Override
132   public void run() {
133     try {
134       LOG.info("SplitLogWorker " + server.getServerName() + " starting");
135       coordination.registerListener();
136       // wait for Coordination Engine is ready
137       boolean res = false;
138       while (!res && !coordination.isStop()) {
139         res = coordination.isReady();
140       }
141       if (!coordination.isStop()) {
142         coordination.taskLoop();
143       }
144     } catch (Throwable t) {
145       if (ExceptionUtil.isInterrupt(t)) {
146         LOG.info("SplitLogWorker interrupted. Exiting. " + (coordination.isStop() ? "" :
147             " (ERROR: exitWorker is not set, exiting anyway)"));
148       } else {
149         // only a logical error can cause here. Printing it out
150         // to make debugging easier
151         LOG.error("unexpected error ", t);
152       }
153     } finally {
154       coordination.removeListener();
155       LOG.info("SplitLogWorker " + server.getServerName() + " exiting");
156     }
157   }
158 
159   /**
160    * If the worker is doing a task i.e. splitting a log file then stop the task.
161    * It doesn't exit the worker thread.
162    */
163   public void stopTask() {
164     LOG.info("Sending interrupt to stop the worker thread");
165     worker.interrupt(); // TODO interrupt often gets swallowed, do what else?
166   }
167 
168   /**
169    * start the SplitLogWorker thread
170    */
171   public void start() {
172     worker = new Thread(null, this, "SplitLogWorker-" + server.getServerName().toShortString());
173     worker.start();
174   }
175 
176   /**
177    * stop the SplitLogWorker thread
178    */
179   public void stop() {
180     coordination.stopProcessingTasks();
181     stopTask();
182   }
183 
184   /**
185    * Objects implementing this interface actually do the task that has been
186    * acquired by a {@link SplitLogWorker}. Since there isn't a water-tight
187    * guarantee that two workers will not be executing the same task therefore it
188    * is better to have workers prepare the task and then have the
189    * {@link org.apache.hadoop.hbase.master.SplitLogManager} commit the work in 
190    * SplitLogManager.TaskFinisher
191    */
192   public interface TaskExecutor {
193     enum Status {
194       DONE(),
195       ERR(),
196       RESIGNED(),
197       PREEMPTED()
198     }
199     Status exec(String name, RecoveryMode mode, CancelableProgressable p);
200   }
201 
202   /**
203    * Returns the number of tasks processed by coordination.
204    * This method is used by tests only
205    */
206   @VisibleForTesting
207   public int getTaskReadySeq() {
208     return coordination.getTaskReadySeq();
209   }
210 }