View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.security.PrivilegedExceptionAction;
24  import java.util.ArrayList;
25  import java.util.List;
26  
27  import org.apache.commons.logging.Log;
28  import org.apache.commons.logging.LogFactory;
29  import org.apache.hadoop.hbase.classification.InterfaceAudience;
30  import org.apache.hadoop.conf.Configuration;
31  import org.apache.hadoop.fs.FileSystem;
32  import org.apache.hadoop.fs.Path;
33  import org.apache.hadoop.hbase.CoordinatedStateException;
34  import org.apache.hadoop.hbase.HRegionInfo;
35  import org.apache.hadoop.hbase.HTableDescriptor;
36  import org.apache.hadoop.hbase.MetaTableAccessor;
37  import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
38  import org.apache.hadoop.hbase.Server;
39  import org.apache.hadoop.hbase.TableDescriptor;
40  import org.apache.hadoop.hbase.TableExistsException;
41  import org.apache.hadoop.hbase.TableName;
42  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
43  import org.apache.hadoop.hbase.client.TableState;
44  import org.apache.hadoop.hbase.executor.EventHandler;
45  import org.apache.hadoop.hbase.executor.EventType;
46  import org.apache.hadoop.hbase.ipc.RequestContext;
47  import org.apache.hadoop.hbase.master.AssignmentManager;
48  import org.apache.hadoop.hbase.master.HMaster;
49  import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
50  import org.apache.hadoop.hbase.master.MasterFileSystem;
51  import org.apache.hadoop.hbase.master.MasterServices;
52  import org.apache.hadoop.hbase.master.TableLockManager;
53  import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
54  import org.apache.hadoop.hbase.security.User;
55  import org.apache.hadoop.hbase.security.UserProvider;
56  import org.apache.hadoop.hbase.util.FSTableDescriptors;
57  import org.apache.hadoop.hbase.util.FSUtils;
58  import org.apache.hadoop.hbase.util.ModifyRegionUtils;
59  import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
60  
61  /**
62   * Handler to create a table.
63   */
64  @InterfaceAudience.Private
65  public class CreateTableHandler extends EventHandler {
66    private static final Log LOG = LogFactory.getLog(CreateTableHandler.class);
67    protected final MasterFileSystem fileSystemManager;
68    protected final HTableDescriptor hTableDescriptor;
69    protected final Configuration conf;
70    private final AssignmentManager assignmentManager;
71    private final TableLockManager tableLockManager;
72    private final HRegionInfo [] newRegions;
73    private final MasterServices masterServices;
74    private final TableLock tableLock;
75    private User activeUser;
76  
77    public CreateTableHandler(Server server, MasterFileSystem fileSystemManager,
78        HTableDescriptor hTableDescriptor, Configuration conf, HRegionInfo [] newRegions,
79        MasterServices masterServices) {
80      super(server, EventType.C_M_CREATE_TABLE);
81  
82      this.fileSystemManager = fileSystemManager;
83      this.hTableDescriptor = hTableDescriptor;
84      this.conf = conf;
85      this.newRegions = newRegions;
86      this.masterServices = masterServices;
87      this.assignmentManager = masterServices.getAssignmentManager();
88      this.tableLockManager = masterServices.getTableLockManager();
89  
90      this.tableLock = this.tableLockManager.writeLock(this.hTableDescriptor.getTableName()
91          , EventType.C_M_CREATE_TABLE.toString());
92    }
93  
94    @Override
95    public CreateTableHandler prepare()
96        throws NotAllMetaRegionsOnlineException, TableExistsException, IOException {
97      int timeout = conf.getInt("hbase.client.catalog.timeout", 10000);
98      // Need hbase:meta availability to create a table
99      try {
100       if (server.getMetaTableLocator().waitMetaRegionLocation(
101           server.getZooKeeper(), timeout) == null) {
102         throw new NotAllMetaRegionsOnlineException();
103       }
104       // If we are creating the table in service to an RPC request, record the
105       // active user for later, so proper permissions will be applied to the
106       // new table by the AccessController if it is active
107       if (RequestContext.isInRequestContext()) {
108         this.activeUser = RequestContext.getRequestUser();
109       } else {
110         this.activeUser = UserProvider.instantiate(conf).getCurrent();
111       }
112     } catch (InterruptedException e) {
113       LOG.warn("Interrupted waiting for meta availability", e);
114       InterruptedIOException ie = new InterruptedIOException(e.getMessage());
115       ie.initCause(e);
116       throw ie;
117     }
118 
119     //acquire the table write lock, blocking. Make sure that it is released.
120     this.tableLock.acquire();
121     boolean success = false;
122     try {
123       TableName tableName = this.hTableDescriptor.getTableName();
124       if (MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) {
125         throw new TableExistsException(tableName);
126       }
127       success = true;
128     } finally {
129       if (!success) {
130         releaseTableLock();
131       }
132     }
133     return this;
134   }
135 
136   @Override
137   public String toString() {
138     String name = "UnknownServerName";
139     if(server != null && server.getServerName() != null) {
140       name = server.getServerName().toString();
141     }
142     return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" +
143       this.hTableDescriptor.getTableName();
144   }
145 
146   @Override
147   public void process() {
148     TableName tableName = this.hTableDescriptor.getTableName();
149     LOG.info("Create table " + tableName);
150     HMaster master = ((HMaster) this.server);
151     try {
152       final MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
153       if (cpHost != null) {
154         cpHost.preCreateTableHandler(this.hTableDescriptor, this.newRegions);
155       }
156       handleCreateTable(tableName);
157       completed(null);
158       if (cpHost != null) {
159         this.activeUser.runAs(new PrivilegedExceptionAction<Void>() {
160           @Override
161           public Void run() throws Exception {
162             cpHost.postCreateTableHandler(hTableDescriptor, newRegions);
163             return null;
164           }
165         });
166       }
167     } catch (Throwable e) {
168       LOG.error("Error trying to create the table " + tableName, e);
169       if (master.isInitialized()) {
170         try {
171           ((HMaster) this.server).getMasterQuotaManager().removeTableFromNamespaceQuota(
172             hTableDescriptor.getTableName());
173         } catch (IOException e1) {
174           LOG.error("Error trying to update namespace quota " + e1);
175         }
176       }
177       completed(e);
178       
179     }
180   }
181 
182   /**
183    * Called after that process() is completed.
184    * @param exception null if process() is successful or not null if something has failed.
185    */
186   protected void completed(final Throwable exception) {
187     releaseTableLock();
188     LOG.info("Table, " + this.hTableDescriptor.getTableName() + ", creation " +
189         (exception == null ? "successful" : "failed. " + exception));
190   }
191 
192   /**
193    * Responsible of table creation (on-disk and META) and assignment.
194    * - Create the table directory and descriptor (temp folder)
195    * - Create the on-disk regions (temp folder)
196    *   [If something fails here: we've just some trash in temp]
197    * - Move the table from temp to the root directory
198    *   [If something fails here: we've the table in place but some of the rows required
199    *    present in META. (hbck needed)]
200    * - Add regions to META
201    *   [If something fails here: we don't have regions assigned: table disabled]
202    * - Assign regions to Region Servers
203    *   [If something fails here: we still have the table in disabled state]
204    * - Update ZooKeeper with the enabled state
205    */
206   private void handleCreateTable(TableName tableName)
207       throws IOException, CoordinatedStateException {
208     Path tempdir = fileSystemManager.getTempDir();
209     FileSystem fs = fileSystemManager.getFileSystem();
210 
211     // 1. Create Table Descriptor
212     // using a copy of descriptor, table will be created enabling first
213     TableDescriptor underConstruction = new TableDescriptor(
214         this.hTableDescriptor);
215     Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);
216     ((FSTableDescriptors)(masterServices.getTableDescriptors()))
217         .createTableDescriptorForTableDirectory(
218         tempTableDir, underConstruction, false);
219     Path tableDir = FSUtils.getTableDir(fileSystemManager.getRootDir(), tableName);
220 
221     // 2. Create Regions
222     List<HRegionInfo> regionInfos = handleCreateHdfsRegions(tempdir, tableName);
223     // 3. Move Table temp directory to the hbase root location
224     if (!fs.rename(tempTableDir, tableDir)) {
225       throw new IOException("Unable to move table from temp=" + tempTableDir +
226         " to hbase root=" + tableDir);
227     }
228 
229     // populate descriptors cache to be visible in getAll
230     masterServices.getTableDescriptors().get(tableName);
231 
232     MetaTableAccessor.updateTableState(this.server.getConnection(), hTableDescriptor.getTableName(),
233         TableState.State.ENABLING);
234 
235     if (regionInfos != null && regionInfos.size() > 0) {
236       // 4. Add regions to META
237       addRegionsToMeta(regionInfos, hTableDescriptor.getRegionReplication());
238       // 5. Add replicas if needed
239       regionInfos = addReplicas(hTableDescriptor, regionInfos);
240 
241       // 6. Setup replication for region replicas if needed
242       if (hTableDescriptor.getRegionReplication() > 1) {
243         ServerRegionReplicaUtil.setupRegionReplicaReplication(conf);
244       }
245 
246       // 7. Trigger immediate assignment of the regions in round-robin fashion
247       ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
248     }
249 
250     // 8. Enable table
251     assignmentManager.getTableStateManager().setTableState(tableName,
252             TableState.State.ENABLED);
253 
254     // 9. Update the tabledescriptor cache.
255     ((HMaster) this.server).getTableDescriptors().get(tableName);
256   }
257 
258   /**
259    * Create any replicas for the regions (the default replicas that was
260    * already created is passed to the method)
261    * @param hTableDescriptor descriptor to use
262    * @param regions default replicas
263    * @return the combined list of default and non-default replicas
264    */
265   protected List<HRegionInfo> addReplicas(HTableDescriptor hTableDescriptor,
266       List<HRegionInfo> regions) {
267     int numRegionReplicas = hTableDescriptor.getRegionReplication() - 1;
268     if (numRegionReplicas <= 0) {
269       return regions;
270     }
271     List<HRegionInfo> hRegionInfos =
272         new ArrayList<HRegionInfo>((numRegionReplicas+1)*regions.size());
273     for (int i = 0; i < regions.size(); i++) {
274       for (int j = 1; j <= numRegionReplicas; j++) {
275         hRegionInfos.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(i), j));
276       }
277     }
278     hRegionInfos.addAll(regions);
279     return hRegionInfos;
280   }
281 
282   private void releaseTableLock() {
283     if (this.tableLock != null) {
284       try {
285         this.tableLock.release();
286       } catch (IOException ex) {
287         LOG.warn("Could not release the table lock", ex);
288       }
289     }
290   }
291 
292   /**
293    * Create the on-disk structure for the table, and returns the regions info.
294    * @param tableRootDir directory where the table is being created
295    * @param tableName name of the table under construction
296    * @return the list of regions created
297    */
298   protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir,
299     final TableName tableName)
300       throws IOException {
301     return ModifyRegionUtils.createRegions(conf, tableRootDir,
302         hTableDescriptor, newRegions, null);
303   }
304 
305   /**
306    * Add the specified set of regions to the hbase:meta table.
307    */
308   protected void addRegionsToMeta(final List<HRegionInfo> regionInfos, int regionReplication)
309       throws IOException {
310     MetaTableAccessor.addRegionsToMeta(this.server.getConnection(), regionInfos, regionReplication);
311   }
312 }