View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.IOException;
22  import java.util.List;
23  import java.util.concurrent.ExecutorService;
24  
25  import org.apache.commons.logging.Log;
26  import org.apache.commons.logging.LogFactory;
27  import org.apache.hadoop.hbase.classification.InterfaceAudience;
28  import org.apache.hadoop.hbase.TableName;
29  import org.apache.hadoop.hbase.HRegionInfo;
30  import org.apache.hadoop.hbase.Server;
31  import org.apache.hadoop.hbase.TableNotEnabledException;
32  import org.apache.hadoop.hbase.TableNotFoundException;
33  import org.apache.hadoop.hbase.MetaTableAccessor;
34  import org.apache.hadoop.hbase.client.TableState;
35  import org.apache.hadoop.hbase.constraint.ConstraintException;
36  import org.apache.hadoop.hbase.executor.EventHandler;
37  import org.apache.hadoop.hbase.executor.EventType;
38  import org.apache.hadoop.hbase.master.AssignmentManager;
39  import org.apache.hadoop.hbase.master.BulkAssigner;
40  import org.apache.hadoop.hbase.master.HMaster;
41  import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
42  import org.apache.hadoop.hbase.master.RegionState;
43  import org.apache.hadoop.hbase.master.RegionStates;
44  import org.apache.hadoop.hbase.master.TableLockManager;
45  import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
46  import org.apache.htrace.Trace;
47  
48  /**
49   * Handler to run disable of a table.
50   */
51  @InterfaceAudience.Private
52  public class DisableTableHandler extends EventHandler {
53    private static final Log LOG = LogFactory.getLog(DisableTableHandler.class);
54    private final TableName tableName;
55    private final AssignmentManager assignmentManager;
56    private final TableLockManager tableLockManager;
57    private final boolean skipTableStateCheck;
58    private TableLock tableLock;
59  
60    public DisableTableHandler(Server server, TableName tableName,
61        AssignmentManager assignmentManager, TableLockManager tableLockManager,
62        boolean skipTableStateCheck) {
63      super(server, EventType.C_M_DISABLE_TABLE);
64      this.tableName = tableName;
65      this.assignmentManager = assignmentManager;
66      this.tableLockManager = tableLockManager;
67      this.skipTableStateCheck = skipTableStateCheck;
68    }
69  
70    public DisableTableHandler prepare()
71        throws TableNotFoundException, TableNotEnabledException, IOException {
72      if(tableName.equals(TableName.META_TABLE_NAME)) {
73        throw new ConstraintException("Cannot disable catalog table");
74      }
75      //acquire the table write lock, blocking
76      this.tableLock = this.tableLockManager.writeLock(tableName,
77          EventType.C_M_DISABLE_TABLE.toString());
78      this.tableLock.acquire();
79  
80      boolean success = false;
81      try {
82        // Check if table exists
83        if (!MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) {
84          throw new TableNotFoundException(tableName);
85        }
86  
87        // There could be multiple client requests trying to disable or enable
88        // the table at the same time. Ensure only the first request is honored
89        // After that, no other requests can be accepted until the table reaches
90        // DISABLED or ENABLED.
91        //TODO: reevaluate this since we have table locks now
92        if (!skipTableStateCheck) {
93          TableState.State state = this.assignmentManager.
94                  getTableStateManager().setTableStateIfInStates(
95                  this.tableName, TableState.State.DISABLING,
96                  TableState.State.ENABLED);
97          if (state!=null) {
98            LOG.info("Table " + tableName + " isn't enabled;is "+state.name()+"; skipping disable");
99            throw new TableNotEnabledException(this.tableName+" state is "+state.name());
100         }
101       }
102       success = true;
103     } finally {
104       if (!success) {
105         releaseTableLock();
106       }
107     }
108 
109     return this;
110   }
111 
112   @Override
113   public String toString() {
114     String name = "UnknownServerName";
115     if(server != null && server.getServerName() != null) {
116       name = server.getServerName().toString();
117     }
118     return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" +
119         tableName;
120   }
121 
122   @Override
123   public void process() {
124     try {
125       LOG.info("Attempting to disable table " + this.tableName);
126       MasterCoprocessorHost cpHost = ((HMaster) this.server)
127           .getMasterCoprocessorHost();
128       if (cpHost != null) {
129         cpHost.preDisableTableHandler(this.tableName);
130       }
131       handleDisableTable();
132       if (cpHost != null) {
133         cpHost.postDisableTableHandler(this.tableName);
134       }
135     } catch (IOException e) {
136       LOG.error("Error trying to disable table " + this.tableName, e);
137     } finally {
138       releaseTableLock();
139     }
140   }
141 
142   private void releaseTableLock() {
143     if (this.tableLock != null) {
144       try {
145         this.tableLock.release();
146       } catch (IOException ex) {
147         LOG.warn("Could not release the table lock", ex);
148       }
149     }
150   }
151 
152   private void handleDisableTable() throws IOException {
153     // Set table disabling flag up in zk.
154     this.assignmentManager.getTableStateManager().setTableState(this.tableName,
155       TableState.State.DISABLING);
156     boolean done = false;
157     while (true) {
158       // Get list of online regions that are of this table.  Regions that are
159       // already closed will not be included in this list; i.e. the returned
160       // list is not ALL regions in a table, its all online regions according
161       // to the in-memory state on this master.
162       final List<HRegionInfo> regions = this.assignmentManager
163         .getRegionStates().getRegionsOfTable(tableName);
164       if (regions.size() == 0) {
165         done = true;
166         break;
167       }
168       LOG.info("Offlining " + regions.size() + " regions.");
169       BulkDisabler bd = new BulkDisabler(this.server, regions);
170       try {
171         if (bd.bulkAssign()) {
172           done = true;
173           break;
174         }
175       } catch (InterruptedException e) {
176         LOG.warn("Disable was interrupted");
177         // Preserve the interrupt.
178         Thread.currentThread().interrupt();
179         break;
180       }
181     }
182     // Flip the table to disabled if success.
183     if (done) this.assignmentManager.getTableStateManager().setTableState(this.tableName,
184       TableState.State.DISABLED);
185     LOG.info("Disabled table, " + this.tableName + ", is done=" + done);
186   }
187 
188   /**
189    * Run bulk disable.
190    */
191   class BulkDisabler extends BulkAssigner {
192     private final List<HRegionInfo> regions;
193 
194     BulkDisabler(final Server server, final List<HRegionInfo> regions) {
195       super(server);
196       this.regions = regions;
197     }
198 
199     @Override
200     protected void populatePool(ExecutorService pool) {
201       RegionStates regionStates = assignmentManager.getRegionStates();
202       for (HRegionInfo region: regions) {
203         if (regionStates.isRegionInTransition(region)
204             && !regionStates.isRegionInState(region, RegionState.State.FAILED_CLOSE)) {
205           continue;
206         }
207         final HRegionInfo hri = region;
208         pool.execute(Trace.wrap("DisableTableHandler.BulkDisabler",new Runnable() {
209           public void run() {
210             assignmentManager.unassign(hri);
211           }
212         }));
213       }
214     }
215 
216     @Override
217     protected boolean waitUntilDone(long timeout)
218     throws InterruptedException {
219       long startTime = System.currentTimeMillis();
220       long remaining = timeout;
221       List<HRegionInfo> regions = null;
222       long lastLogTime = startTime;
223       while (!server.isStopped() && remaining > 0) {
224         Thread.sleep(waitingTimeForEvents);
225         regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName);
226         long now = System.currentTimeMillis();
227         // Don't log more than once every ten seconds. Its obnoxious. And only log table regions
228         // if we are waiting a while for them to go down...
229         if (LOG.isDebugEnabled() && ((now - lastLogTime) > 10000)) {
230           lastLogTime =  now;
231           LOG.debug("Disable waiting until done; " + remaining + " ms remaining; " + regions);
232         }
233         if (regions.isEmpty()) break;
234         remaining = timeout - (now - startTime);
235       }
236       return regions != null && regions.isEmpty();
237     }
238   }
239 }