View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.master.procedure;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.LinkedList;
24  import java.util.List;
25  import java.util.NavigableMap;
26  import java.util.TreeMap;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.hbase.HRegionInfo;
31  import org.apache.hadoop.hbase.HRegionLocation;
32  import org.apache.hadoop.hbase.MetaTableAccessor;
33  import org.apache.hadoop.hbase.ServerName;
34  import org.apache.hadoop.hbase.TableName;
35  import org.apache.hadoop.hbase.TableNotDisabledException;
36  import org.apache.hadoop.hbase.TableNotFoundException;
37  import org.apache.hadoop.hbase.classification.InterfaceAudience;
38  import org.apache.hadoop.hbase.client.Connection;
39  import org.apache.hadoop.hbase.client.RegionLocator;
40  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
41  import org.apache.hadoop.hbase.master.AssignmentManager;
42  import org.apache.hadoop.hbase.master.BulkReOpen;
43  import org.apache.hadoop.hbase.master.MasterFileSystem;
44  import org.apache.hadoop.hbase.util.Bytes;
45  
46  import com.google.common.collect.Lists;
47  import com.google.common.collect.Maps;
48  
49  /**
50   * Helper class for schema change procedures
51   */
52  @InterfaceAudience.Private
53  public final class MasterDDLOperationHelper {
54    private static final Log LOG = LogFactory.getLog(MasterDDLOperationHelper.class);
55  
56    private MasterDDLOperationHelper() {}
57  
58    /**
59     * Check whether online schema change is allowed from config
60     **/
61    public static boolean isOnlineSchemaChangeAllowed(final MasterProcedureEnv env) {
62      return env.getMasterServices().getConfiguration()
63          .getBoolean("hbase.online.schema.update.enable", false);
64    }
65  
66    /**
67     * Check whether a table is modifiable - exists and either offline or online with config set
68     * @param env MasterProcedureEnv
69     * @param tableName name of the table
70     * @throws IOException
71     */
72    public static void checkTableModifiable(final MasterProcedureEnv env, final TableName tableName)
73        throws IOException {
74      // Checks whether the table exists
75      if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
76        throw new TableNotFoundException(tableName);
77      }
78  
79      // We only execute this procedure with table online if online schema change config is set.
80      if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
81          .isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)
82          && !MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) {
83        throw new TableNotDisabledException(tableName);
84      }
85    }
86  
87    /**
88     * Remove the column family from the file system
89     **/
90    public static void deleteColumnFamilyFromFileSystem(
91        final MasterProcedureEnv env,
92        final TableName tableName,
93        List<HRegionInfo> regionInfoList,
94        final byte[] familyName) throws IOException {
95      final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
96      if (LOG.isDebugEnabled()) {
97        LOG.debug("Removing family=" + Bytes.toString(familyName) + " from table=" + tableName);
98      }
99      if (regionInfoList == null) {
100       regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, tableName);
101     }
102     for (HRegionInfo hri : regionInfoList) {
103       // Delete the family directory in FS for all the regions one by one
104       mfs.deleteFamilyFromFS(hri, familyName);
105     }
106   }
107 
108   /**
109    * Reopen all regions from a table after a schema change operation.
110    **/
111   public static boolean reOpenAllRegions(
112       final MasterProcedureEnv env,
113       final TableName tableName,
114       final List<HRegionInfo> regionInfoList) throws IOException {
115     boolean done = false;
116     LOG.info("Bucketing regions by region server...");
117     List<HRegionLocation> regionLocations = null;
118     Connection connection = env.getMasterServices().getConnection();
119     try (RegionLocator locator = connection.getRegionLocator(tableName)) {
120       regionLocations = locator.getAllRegionLocations();
121     }
122     // Convert List<HRegionLocation> to Map<HRegionInfo, ServerName>.
123     NavigableMap<HRegionInfo, ServerName> hri2Sn = new TreeMap<HRegionInfo, ServerName>();
124     for (HRegionLocation location : regionLocations) {
125       hri2Sn.put(location.getRegionInfo(), location.getServerName());
126     }
127     TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps.newTreeMap();
128     List<HRegionInfo> reRegions = new ArrayList<HRegionInfo>();
129     for (HRegionInfo hri : regionInfoList) {
130       ServerName sn = hri2Sn.get(hri);
131       // Skip the offlined split parent region
132       // See HBASE-4578 for more information.
133       if (null == sn) {
134         LOG.info("Skip " + hri);
135         continue;
136       }
137       if (!serverToRegions.containsKey(sn)) {
138         LinkedList<HRegionInfo> hriList = Lists.newLinkedList();
139         serverToRegions.put(sn, hriList);
140       }
141       reRegions.add(hri);
142       serverToRegions.get(sn).add(hri);
143     }
144 
145     LOG.info("Reopening " + reRegions.size() + " regions on " + serverToRegions.size()
146         + " region servers.");
147     AssignmentManager am = env.getMasterServices().getAssignmentManager();
148     am.setRegionsToReopen(reRegions);
149     BulkReOpen bulkReopen = new BulkReOpen(env.getMasterServices(), serverToRegions, am);
150     while (true) {
151       try {
152         if (bulkReopen.bulkReOpen()) {
153           done = true;
154           break;
155         } else {
156           LOG.warn("Timeout before reopening all regions");
157         }
158       } catch (InterruptedException e) {
159         LOG.warn("Reopen was interrupted");
160         // Preserve the interrupt.
161         Thread.currentThread().interrupt();
162         break;
163       }
164     }
165     return done;
166   }
167 }