View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.InterruptedIOException;
22  import java.io.IOException;
23  import java.util.List;
24  
25  import org.apache.commons.logging.Log;
26  import org.apache.commons.logging.LogFactory;
27  import org.apache.hadoop.hbase.classification.InterfaceAudience;
28  import org.apache.hadoop.fs.FileSystem;
29  import org.apache.hadoop.fs.Path;
30  import org.apache.hadoop.hbase.CoordinatedStateException;
31  import org.apache.hadoop.hbase.TableDescriptor;
32  import org.apache.hadoop.hbase.TableName;
33  import org.apache.hadoop.hbase.HRegionInfo;
34  import org.apache.hadoop.hbase.HTableDescriptor;
35  import org.apache.hadoop.hbase.Server;
36  import org.apache.hadoop.hbase.backup.HFileArchiver;
37  import org.apache.hadoop.hbase.MetaTableAccessor;
38  import org.apache.hadoop.hbase.executor.EventType;
39  import org.apache.hadoop.hbase.regionserver.HRegion;
40  import org.apache.hadoop.hbase.master.AssignmentManager;
41  import org.apache.hadoop.hbase.master.HMaster;
42  import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
43  import org.apache.hadoop.hbase.master.MasterFileSystem;
44  import org.apache.hadoop.hbase.master.MasterServices;
45  import org.apache.hadoop.hbase.master.RegionStates;
46  import org.apache.hadoop.hbase.master.RegionState.State;
47  
48  @InterfaceAudience.Private
49  public class DeleteTableHandler extends TableEventHandler {
50    private static final Log LOG = LogFactory.getLog(DeleteTableHandler.class);
51  
52    protected HTableDescriptor hTableDescriptor = null;
53  
54    public DeleteTableHandler(TableName tableName, Server server,
55        final MasterServices masterServices) {
56      super(EventType.C_M_DELETE_TABLE, tableName, server, masterServices);
57    }
58  
59    @Override
60    protected void prepareWithTableLock() throws IOException {
61      // The next call fails if no such table.
62      hTableDescriptor = getTableDescriptor().getHTableDescriptor();
63    }
64  
65    protected void waitRegionInTransition(final List<HRegionInfo> regions)
66        throws IOException, CoordinatedStateException {
67      AssignmentManager am = this.masterServices.getAssignmentManager();
68      RegionStates states = am.getRegionStates();
69      long waitTime = server.getConfiguration().
70        getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
71      for (HRegionInfo region : regions) {
72        long done = System.currentTimeMillis() + waitTime;
73        while (System.currentTimeMillis() < done) {
74          if (states.isRegionInState(region, State.FAILED_OPEN)) {
75            am.regionOffline(region);
76          }
77          if (!states.isRegionInTransition(region)) break;
78          try {
79            Thread.sleep(waitingTimeForEvents);
80          } catch (InterruptedException e) {
81            LOG.warn("Interrupted while sleeping");
82            throw (InterruptedIOException)new InterruptedIOException().initCause(e);
83          }
84          LOG.debug("Waiting on region to clear regions in transition; "
85            + am.getRegionStates().getRegionTransitionState(region));
86        }
87        if (states.isRegionInTransition(region)) {
88          throw new IOException("Waited hbase.master.wait.on.region (" +
89            waitTime + "ms) for region to leave region " +
90            region.getRegionNameAsString() + " in transitions");
91        }
92      }
93    }
94  
95    @Override
96    protected void handleTableOperation(List<HRegionInfo> regions)
97        throws IOException, CoordinatedStateException {
98      MasterCoprocessorHost cpHost = ((HMaster) this.server).getMasterCoprocessorHost();
99      if (cpHost != null) {
100       cpHost.preDeleteTableHandler(this.tableName);
101     }
102 
103     // 1. Wait because of region in transition
104     waitRegionInTransition(regions);
105 
106       // 2. Remove table from hbase:meta and HDFS
107     removeTableData(regions);
108 
109     if (cpHost != null) {
110       cpHost.postDeleteTableHandler(this.tableName);
111     }
112   }
113 
114   private void cleanupTableState() throws IOException {
115     // 3. Update table descriptor cache
116     LOG.debug("Removing '" + tableName + "' descriptor.");
117     this.masterServices.getTableDescriptors().remove(tableName);
118 
119     AssignmentManager am = this.masterServices.getAssignmentManager();
120 
121     // 4. Clean up regions of the table in RegionStates.
122     LOG.debug("Removing '" + tableName + "' from region states.");
123     am.getRegionStates().tableDeleted(tableName);
124 
125     // 5. If entry for this table states, remove it.
126     LOG.debug("Marking '" + tableName + "' as deleted.");
127     am.getTableStateManager().setDeletedTable(tableName);
128   }
129 
130   /**
131    * Removes the table from hbase:meta and archives the HDFS files.
132    */
133   protected void removeTableData(final List<HRegionInfo> regions)
134       throws IOException, CoordinatedStateException {
135     try {
136       // 1. Remove regions from META
137       LOG.debug("Deleting regions from META");
138       MetaTableAccessor.deleteRegions(this.server.getShortCircuitConnection(), regions);
139 
140       // -----------------------------------------------------------------------
141       // NOTE: At this point we still have data on disk, but nothing in hbase:meta
142       //       if the rename below fails, hbck will report an inconsistency.
143       // -----------------------------------------------------------------------
144 
145       // 2. Move the table in /hbase/.tmp
146       MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
147       Path tempTableDir = mfs.moveTableToTemp(tableName);
148 
149       // 3. Archive regions from FS (temp directory)
150       FileSystem fs = mfs.getFileSystem();
151       for (HRegionInfo hri : regions) {
152         LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
153         HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
154             tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
155       }
156 
157       // 4. Delete table directory from FS (temp directory)
158       if (!fs.delete(tempTableDir, true)) {
159         LOG.error("Couldn't delete " + tempTableDir);
160       }
161 
162       LOG.debug("Table '" + tableName + "' archived!");
163     } finally {
164       cleanupTableState();
165     }
166   }
167 
168   @Override
169   protected void releaseTableLock() {
170     super.releaseTableLock();
171     try {
172       masterServices.getTableLockManager().tableDeleted(tableName);
173     } catch (IOException ex) {
174       LOG.warn("Received exception from TableLockManager.tableDeleted:", ex); //not critical
175     }
176   }
177 
178   @Override
179   public String toString() {
180     String name = "UnknownServerName";
181     if(server != null && server.getServerName() != null) {
182       name = server.getServerName().toString();
183     }
184     return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + tableName;
185   }
186 }