View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.util.ArrayList;
24  import java.util.List;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.fs.FileSystem;
29  import org.apache.hadoop.fs.Path;
30  import org.apache.hadoop.hbase.CoordinatedStateException;
31  import org.apache.hadoop.hbase.HRegionInfo;
32  import org.apache.hadoop.hbase.HTableDescriptor;
33  import org.apache.hadoop.hbase.MetaTableAccessor;
34  import org.apache.hadoop.hbase.Server;
35  import org.apache.hadoop.hbase.TableName;
36  import org.apache.hadoop.hbase.backup.HFileArchiver;
37  import org.apache.hadoop.hbase.classification.InterfaceAudience;
38  import org.apache.hadoop.hbase.client.Delete;
39  import org.apache.hadoop.hbase.client.Result;
40  import org.apache.hadoop.hbase.client.ResultScanner;
41  import org.apache.hadoop.hbase.client.Scan;
42  import org.apache.hadoop.hbase.client.Table;
43  import org.apache.hadoop.hbase.executor.EventType;
44  import org.apache.hadoop.hbase.master.AssignmentManager;
45  import org.apache.hadoop.hbase.master.HMaster;
46  import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
47  import org.apache.hadoop.hbase.master.MasterFileSystem;
48  import org.apache.hadoop.hbase.master.MasterServices;
49  import org.apache.hadoop.hbase.master.RegionState.State;
50  import org.apache.hadoop.hbase.master.RegionStates;
51  import org.apache.hadoop.hbase.regionserver.HRegion;
52  
53  @InterfaceAudience.Private
54  public class DeleteTableHandler extends TableEventHandler {
55    private static final Log LOG = LogFactory.getLog(DeleteTableHandler.class);
56  
57    protected HTableDescriptor hTableDescriptor = null;
58  
59    public DeleteTableHandler(TableName tableName, Server server,
60        final MasterServices masterServices) {
61      super(EventType.C_M_DELETE_TABLE, tableName, server, masterServices);
62    }
63  
64    @Override
65    protected void prepareWithTableLock() throws IOException {
66      // The next call fails if no such table.
67      hTableDescriptor = getTableDescriptor();
68    }
69  
70    protected void waitRegionInTransition(final List<HRegionInfo> regions)
71        throws IOException, CoordinatedStateException {
72      AssignmentManager am = this.masterServices.getAssignmentManager();
73      RegionStates states = am.getRegionStates();
74      long waitTime = server.getConfiguration().
75        getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
76      for (HRegionInfo region : regions) {
77        long done = System.currentTimeMillis() + waitTime;
78        while (System.currentTimeMillis() < done) {
79          if (states.isRegionInState(region, State.FAILED_OPEN)) {
80            am.regionOffline(region);
81          }
82          if (!states.isRegionInTransition(region)) break;
83          try {
84            Thread.sleep(waitingTimeForEvents);
85          } catch (InterruptedException e) {
86            LOG.warn("Interrupted while sleeping");
87            throw (InterruptedIOException)new InterruptedIOException().initCause(e);
88          }
89          LOG.debug("Waiting on region to clear regions in transition; "
90            + am.getRegionStates().getRegionTransitionState(region));
91        }
92        if (states.isRegionInTransition(region)) {
93          throw new IOException("Waited hbase.master.wait.on.region (" +
94            waitTime + "ms) for region to leave region " +
95            region.getRegionNameAsString() + " in transitions");
96        }
97      }
98    }
99  
100   @Override
101   protected void handleTableOperation(List<HRegionInfo> regions)
102       throws IOException, CoordinatedStateException {
103     MasterCoprocessorHost cpHost = ((HMaster) this.server).getMasterCoprocessorHost();
104     if (cpHost != null) {
105       cpHost.preDeleteTableHandler(this.tableName);
106     }
107 
108     // 1. Wait because of region in transition
109     waitRegionInTransition(regions);
110 
111     try {
112       // 2. Remove table from hbase:meta and HDFS
113       removeTableData(regions);
114     } finally {
115       // 3. Update table descriptor cache
116       LOG.debug("Removing '" + tableName + "' descriptor.");
117       this.masterServices.getTableDescriptors().remove(tableName);
118 
119       AssignmentManager am = this.masterServices.getAssignmentManager();
120 
121       // 4. Clean up regions of the table in RegionStates.
122       LOG.debug("Removing '" + tableName + "' from region states.");
123       am.getRegionStates().tableDeleted(tableName);
124 
125       // 5. If entry for this table in zk, and up in AssignmentManager, remove it.
126       LOG.debug("Marking '" + tableName + "' as deleted.");
127       am.getTableStateManager().setDeletedTable(tableName);
128 
129       // 6.Clean any remaining rows for this table.
130       cleanAnyRemainingRows();
131     }
132 
133     if (cpHost != null) {
134       cpHost.postDeleteTableHandler(this.tableName);
135     }
136     ((HMaster) this.server).getMasterQuotaManager().removeTableFromNamespaceQuota(tableName);
137   }
138 
139   /**
140    * There may be items for this table still up in hbase:meta in the case where the
141    * info:regioninfo column was empty because of some write error. Remove ALL rows from hbase:meta
142    * that have to do with this table. See HBASE-12980.
143    * @throws IOException
144    */
145   private void cleanAnyRemainingRows() throws IOException {
146     Scan tableScan = MetaTableAccessor.getScanForTableName(tableName);
147     try (Table metaTable =
148         this.masterServices.getConnection().getTable(TableName.META_TABLE_NAME)) {
149       List<Delete> deletes = new ArrayList<Delete>();
150       try (ResultScanner resScanner = metaTable.getScanner(tableScan)) {
151         for (Result result : resScanner) {
152           deletes.add(new Delete(result.getRow()));
153         }
154       }
155       if (!deletes.isEmpty()) {
156         LOG.warn("Deleting some vestigal " + deletes.size() + " rows of " + this.tableName +
157           " from " + TableName.META_TABLE_NAME);
158         if (LOG.isDebugEnabled()) {
159           for (Delete d: deletes) LOG.debug("Purging " + d);
160         }
161         metaTable.delete(deletes);
162       }
163     }
164   }
165 
166   /**
167    * Removes the table from hbase:meta and archives the HDFS files.
168    */
169   protected void removeTableData(final List<HRegionInfo> regions)
170   throws IOException, CoordinatedStateException {
171     // 1. Remove regions from META
172     LOG.debug("Deleting regions from META");
173     MetaTableAccessor.deleteRegions(this.server.getConnection(), regions);
174 
175     // -----------------------------------------------------------------------
176     // NOTE: At this point we still have data on disk, but nothing in hbase:meta
177     //       if the rename below fails, hbck will report an inconsistency.
178     // -----------------------------------------------------------------------
179 
180     // 2. Move the table in /hbase/.tmp
181     MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
182     Path tempTableDir = mfs.moveTableToTemp(tableName);
183 
184     // 3. Archive regions from FS (temp directory)
185     FileSystem fs = mfs.getFileSystem();
186     for (HRegionInfo hri : regions) {
187       LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
188       HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
189           tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
190     }
191 
192     // 4. Delete table directory from FS (temp directory)
193     if (!fs.delete(tempTableDir, true)) {
194       LOG.error("Couldn't delete " + tempTableDir);
195     }
196 
197     LOG.debug("Table '" + tableName + "' archived!");
198   }
199 
200   @Override
201   protected void releaseTableLock() {
202     super.releaseTableLock();
203     try {
204       masterServices.getTableLockManager().tableDeleted(tableName);
205     } catch (IOException ex) {
206       LOG.warn("Received exception from TableLockManager.tableDeleted:", ex); //not critical
207     }
208   }
209 
210   @Override
211   public String toString() {
212     String name = "UnknownServerName";
213     if(server != null && server.getServerName() != null) {
214       name = server.getServerName().toString();
215     }
216     return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + tableName;
217   }
218 }