View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.FileNotFoundException;
22  import java.io.IOException;
23  import java.util.ArrayList;
24  import java.util.LinkedList;
25  import java.util.List;
26  import java.util.NavigableMap;
27  import java.util.TreeMap;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  import org.apache.hadoop.hbase.classification.InterfaceAudience;
32  import org.apache.hadoop.hbase.CoordinatedStateException;
33  import org.apache.hadoop.hbase.TableDescriptor;
34  import org.apache.hadoop.hbase.TableName;
35  import org.apache.hadoop.hbase.HRegionInfo;
36  import org.apache.hadoop.hbase.HTableDescriptor;
37  import org.apache.hadoop.hbase.InvalidFamilyOperationException;
38  import org.apache.hadoop.hbase.Server;
39  import org.apache.hadoop.hbase.ServerName;
40  import org.apache.hadoop.hbase.TableExistsException;
41  import org.apache.hadoop.hbase.TableNotDisabledException;
42  import org.apache.hadoop.hbase.MetaTableAccessor;
43  import org.apache.hadoop.hbase.client.HTable;
44  import org.apache.hadoop.hbase.client.TableState;
45  import org.apache.hadoop.hbase.executor.EventHandler;
46  import org.apache.hadoop.hbase.executor.EventType;
47  import org.apache.hadoop.hbase.master.BulkReOpen;
48  import org.apache.hadoop.hbase.master.MasterServices;
49  import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
50  import org.apache.hadoop.hbase.util.Bytes;
51  
52  import com.google.common.collect.Lists;
53  import com.google.common.collect.Maps;
54  import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
55  
56  /**
57   * Base class for performing operations against tables.
58   * Checks on whether the process can go forward are done in constructor rather
59   * than later on in {@link #process()}.  The idea is to fail fast rather than
60   * later down in an async invocation of {@link #process()} (which currently has
61   * no means of reporting back issues once started).
62   */
63  @InterfaceAudience.Private
64  public abstract class TableEventHandler extends EventHandler {
65    private static final Log LOG = LogFactory.getLog(TableEventHandler.class);
66    protected final MasterServices masterServices;
67    protected final TableName tableName;
68    protected TableLock tableLock;
69    private boolean isPrepareCalled = false;
70  
71    public TableEventHandler(EventType eventType, TableName tableName, Server server,
72        MasterServices masterServices) {
73      super(server, eventType);
74      this.masterServices = masterServices;
75      this.tableName = tableName;
76    }
77  
78    public TableEventHandler prepare() throws IOException {
79      //acquire the table write lock, blocking
80      this.tableLock = masterServices.getTableLockManager()
81          .writeLock(tableName, eventType.toString());
82      this.tableLock.acquire();
83      boolean success = false;
84      try {
85        try {
86          this.masterServices.checkTableModifiable(tableName);
87        } catch (TableNotDisabledException ex)  {
88          if (isOnlineSchemaChangeAllowed()
89              && eventType.isOnlineSchemaChangeSupported()) {
90            LOG.debug("Ignoring table not disabled exception " +
91                "for supporting online schema changes.");
92          } else {
93            throw ex;
94          }
95        }
96        prepareWithTableLock();
97        success = true;
98      } finally {
99        if (!success ) {
100         releaseTableLock();
101       }
102     }
103     this.isPrepareCalled = true;
104     return this;
105   }
106 
107   /** Called from prepare() while holding the table lock. Subclasses
108    * can do extra initialization, and not worry about the releasing
109    * the table lock. */
110   protected void prepareWithTableLock() throws IOException {
111   }
112 
113   private boolean isOnlineSchemaChangeAllowed() {
114     return this.server.getConfiguration().getBoolean(
115       "hbase.online.schema.update.enable", false);
116   }
117 
118   @Override
119   public void process() {
120     if (!isPrepareCalled) {
121       //For proper table locking semantics, the implementor should ensure to call
122       //TableEventHandler.prepare() before calling process()
123       throw new RuntimeException("Implementation should have called prepare() first");
124     }
125     try {
126       LOG.info("Handling table operation " + eventType + " on table " +
127           tableName);
128 
129       List<HRegionInfo> hris;
130       if (TableName.META_TABLE_NAME.equals(tableName)) {
131         hris = new MetaTableLocator().getMetaRegions(server.getZooKeeper());
132       } else {
133         hris = MetaTableAccessor.getTableRegions(server.getShortCircuitConnection(), tableName);
134       }
135       handleTableOperation(hris);
136       if (eventType.isOnlineSchemaChangeSupported() && this.masterServices.
137           getAssignmentManager().getTableStateManager().isTableState(
138           tableName, TableState.State.ENABLED)) {
139         if (reOpenAllRegions(hris)) {
140           LOG.info("Completed table operation " + eventType + " on table " +
141               tableName);
142         } else {
143           LOG.warn("Error on reopening the regions");
144         }
145       }
146       completed(null);
147     } catch (IOException e) {
148       LOG.error("Error manipulating table " + tableName, e);
149       completed(e);
150     } catch (CoordinatedStateException e) {
151       LOG.error("Error manipulating table " + tableName, e);
152       completed(e);
153     } finally {
154       releaseTableLock();
155     }
156   }
157 
158   protected void releaseTableLock() {
159     if (this.tableLock != null) {
160       try {
161         this.tableLock.release();
162       } catch (IOException ex) {
163         LOG.warn("Could not release the table lock", ex);
164       }
165     }
166   }
167 
168   /**
169    * Called after that process() is completed.
170    * @param exception null if process() is successful or not null if something has failed.
171    */
172   protected void completed(final Throwable exception) {
173   }
174 
175   public boolean reOpenAllRegions(List<HRegionInfo> regions) throws IOException {
176     boolean done = false;
177     LOG.info("Bucketing regions by region server...");
178     HTable table = new HTable(masterServices.getConfiguration(), tableName);
179     TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps
180         .newTreeMap();
181     NavigableMap<HRegionInfo, ServerName> hriHserverMapping;
182     try {
183       hriHserverMapping = table.getRegionLocations();
184     } finally {
185       table.close();
186     }
187 
188     List<HRegionInfo> reRegions = new ArrayList<HRegionInfo>();
189     for (HRegionInfo hri : regions) {
190       ServerName rsLocation = hriHserverMapping.get(hri);
191 
192       // Skip the offlined split parent region
193       // See HBASE-4578 for more information.
194       if (null == rsLocation) {
195         LOG.info("Skip " + hri);
196         continue;
197       }
198       if (!serverToRegions.containsKey(rsLocation)) {
199         LinkedList<HRegionInfo> hriList = Lists.newLinkedList();
200         serverToRegions.put(rsLocation, hriList);
201       }
202       reRegions.add(hri);
203       serverToRegions.get(rsLocation).add(hri);
204     }
205 
206     LOG.info("Reopening " + reRegions.size() + " regions on "
207         + serverToRegions.size() + " region servers.");
208     this.masterServices.getAssignmentManager().setRegionsToReopen(reRegions);
209     BulkReOpen bulkReopen = new BulkReOpen(this.server, serverToRegions,
210         this.masterServices.getAssignmentManager());
211     while (true) {
212       try {
213         if (bulkReopen.bulkReOpen()) {
214           done = true;
215           break;
216         } else {
217           LOG.warn("Timeout before reopening all regions");
218         }
219       } catch (InterruptedException e) {
220         LOG.warn("Reopen was interrupted");
221         // Preserve the interrupt.
222         Thread.currentThread().interrupt();
223         break;
224       }
225     }
226     return done;
227   }
228 
229 
230   /**
231    * Gets a TableDescriptor from the masterServices.  Can Throw exceptions.
232    *
233    * @return Table descriptor for this table
234    * @throws TableExistsException
235    * @throws FileNotFoundException
236    * @throws IOException
237    */
238   public TableDescriptor getTableDescriptor()
239   throws FileNotFoundException, IOException {
240     TableDescriptor htd =
241       this.masterServices.getTableDescriptors().getDescriptor(tableName);
242     if (htd == null) {
243       throw new IOException("HTableDescriptor missing for " + tableName);
244     }
245     return htd;
246   }
247 
248   byte [] hasColumnFamily(final HTableDescriptor htd, final byte [] cf)
249   throws InvalidFamilyOperationException {
250     if (!htd.hasFamily(cf)) {
251       throw new InvalidFamilyOperationException("Column family '" +
252         Bytes.toString(cf) + "' does not exist");
253     }
254     return cf;
255   }
256 
257   protected abstract void handleTableOperation(List<HRegionInfo> regions)
258     throws IOException, CoordinatedStateException;
259 }