View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.InterruptedIOException;
22  import java.io.IOException;
23  import java.util.List;
24  
25  import org.apache.commons.logging.Log;
26  import org.apache.commons.logging.LogFactory;
27  import org.apache.hadoop.hbase.classification.InterfaceAudience;
28  import org.apache.hadoop.fs.FileSystem;
29  import org.apache.hadoop.fs.Path;
30  import org.apache.hadoop.hbase.CoordinatedStateException;
31  import org.apache.hadoop.hbase.TableName;
32  import org.apache.hadoop.hbase.HRegionInfo;
33  import org.apache.hadoop.hbase.HTableDescriptor;
34  import org.apache.hadoop.hbase.Server;
35  import org.apache.hadoop.hbase.backup.HFileArchiver;
36  import org.apache.hadoop.hbase.MetaTableAccessor;
37  import org.apache.hadoop.hbase.executor.EventType;
38  import org.apache.hadoop.hbase.regionserver.HRegion;
39  import org.apache.hadoop.hbase.master.AssignmentManager;
40  import org.apache.hadoop.hbase.master.HMaster;
41  import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
42  import org.apache.hadoop.hbase.master.MasterFileSystem;
43  import org.apache.hadoop.hbase.master.MasterServices;
44  import org.apache.hadoop.hbase.master.RegionStates;
45  import org.apache.hadoop.hbase.master.RegionState.State;
46  
47  @InterfaceAudience.Private
48  public class DeleteTableHandler extends TableEventHandler {
49    private static final Log LOG = LogFactory.getLog(DeleteTableHandler.class);
50  
51    protected HTableDescriptor hTableDescriptor = null;
52  
53    public DeleteTableHandler(TableName tableName, Server server,
54        final MasterServices masterServices) {
55      super(EventType.C_M_DELETE_TABLE, tableName, server, masterServices);
56    }
57  
58    @Override
59    protected void prepareWithTableLock() throws IOException {
60      // The next call fails if no such table.
61      hTableDescriptor = getTableDescriptor().getHTableDescriptor();
62    }
63  
64    protected void waitRegionInTransition(final List<HRegionInfo> regions)
65        throws IOException, CoordinatedStateException {
66      AssignmentManager am = this.masterServices.getAssignmentManager();
67      RegionStates states = am.getRegionStates();
68      long waitTime = server.getConfiguration().
69        getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
70      for (HRegionInfo region : regions) {
71        long done = System.currentTimeMillis() + waitTime;
72        while (System.currentTimeMillis() < done) {
73          if (states.isRegionInState(region, State.FAILED_OPEN)) {
74            am.regionOffline(region);
75          }
76          if (!states.isRegionInTransition(region)) break;
77          try {
78            Thread.sleep(waitingTimeForEvents);
79          } catch (InterruptedException e) {
80            LOG.warn("Interrupted while sleeping");
81            throw (InterruptedIOException)new InterruptedIOException().initCause(e);
82          }
83          LOG.debug("Waiting on region to clear regions in transition; "
84            + am.getRegionStates().getRegionTransitionState(region));
85        }
86        if (states.isRegionInTransition(region)) {
87          throw new IOException("Waited hbase.master.wait.on.region (" +
88            waitTime + "ms) for region to leave region " +
89            region.getRegionNameAsString() + " in transitions");
90        }
91      }
92    }
93  
94    @Override
95    protected void handleTableOperation(List<HRegionInfo> regions)
96        throws IOException, CoordinatedStateException {
97      MasterCoprocessorHost cpHost = ((HMaster) this.server).getMasterCoprocessorHost();
98      if (cpHost != null) {
99        cpHost.preDeleteTableHandler(this.tableName);
100     }
101 
102     // 1. Wait because of region in transition
103     waitRegionInTransition(regions);
104 
105       // 2. Remove table from hbase:meta and HDFS
106     removeTableData(regions);
107 
108     if (cpHost != null) {
109       cpHost.postDeleteTableHandler(this.tableName);
110     }
111   }
112 
113   private void cleanupTableState() throws IOException {
114     // 3. Update table descriptor cache
115     LOG.debug("Removing '" + tableName + "' descriptor.");
116     this.masterServices.getTableDescriptors().remove(tableName);
117 
118     AssignmentManager am = this.masterServices.getAssignmentManager();
119 
120     // 4. Clean up regions of the table in RegionStates.
121     LOG.debug("Removing '" + tableName + "' from region states.");
122     am.getRegionStates().tableDeleted(tableName);
123 
124     // 5. If entry for this table states, remove it.
125     LOG.debug("Marking '" + tableName + "' as deleted.");
126     am.getTableStateManager().setDeletedTable(tableName);
127   }
128 
129   /**
130    * Removes the table from hbase:meta and archives the HDFS files.
131    */
132   protected void removeTableData(final List<HRegionInfo> regions)
133       throws IOException, CoordinatedStateException {
134     try {
135       // 1. Remove regions from META
136       LOG.debug("Deleting regions from META");
137       MetaTableAccessor.deleteRegions(this.server.getConnection(), regions);
138 
139       // -----------------------------------------------------------------------
140       // NOTE: At this point we still have data on disk, but nothing in hbase:meta
141       //       if the rename below fails, hbck will report an inconsistency.
142       // -----------------------------------------------------------------------
143 
144       // 2. Move the table in /hbase/.tmp
145       MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
146       Path tempTableDir = mfs.moveTableToTemp(tableName);
147 
148       // 3. Archive regions from FS (temp directory)
149       FileSystem fs = mfs.getFileSystem();
150       for (HRegionInfo hri : regions) {
151         LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
152         HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
153             tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
154       }
155 
156       // 4. Delete table directory from FS (temp directory)
157       if (!fs.delete(tempTableDir, true)) {
158         LOG.error("Couldn't delete " + tempTableDir);
159       }
160 
161       LOG.debug("Table '" + tableName + "' archived!");
162     } finally {
163       cleanupTableState();
164     }
165   }
166 
167   @Override
168   protected void releaseTableLock() {
169     super.releaseTableLock();
170     try {
171       masterServices.getTableLockManager().tableDeleted(tableName);
172     } catch (IOException ex) {
173       LOG.warn("Received exception from TableLockManager.tableDeleted:", ex); //not critical
174     }
175   }
176 
177   @Override
178   public String toString() {
179     String name = "UnknownServerName";
180     if(server != null && server.getServerName() != null) {
181       name = server.getServerName().toString();
182     }
183     return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + tableName;
184   }
185 }