View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.util;
21  
22  import java.io.IOException;
23  
24  import org.apache.commons.logging.Log;
25  import org.apache.commons.logging.LogFactory;
26  import org.apache.hadoop.hbase.classification.InterfaceAudience;
27  import org.apache.hadoop.hbase.classification.InterfaceStability;
28  import org.apache.hadoop.conf.Configuration;
29  import org.apache.hadoop.conf.Configured;
30  import org.apache.hadoop.fs.FileSystem;
31  import org.apache.hadoop.fs.Path;
32  import org.apache.hadoop.hbase.TableDescriptor;
33  import org.apache.hadoop.hbase.TableName;
34  import org.apache.hadoop.hbase.HBaseConfiguration;
35  import org.apache.hadoop.hbase.HConstants;
36  import org.apache.hadoop.hbase.HRegionInfo;
37  import org.apache.hadoop.hbase.HTableDescriptor;
38  import org.apache.hadoop.hbase.MasterNotRunningException;
39  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
40  import org.apache.hadoop.hbase.client.Delete;
41  import org.apache.hadoop.hbase.client.Get;
42  import org.apache.hadoop.hbase.client.HBaseAdmin;
43  import org.apache.hadoop.hbase.client.Result;
44  import org.apache.hadoop.hbase.regionserver.HRegion;
45  import org.apache.hadoop.hbase.regionserver.wal.HLog;
46  import org.apache.hadoop.io.WritableComparator;
47  import org.apache.hadoop.util.GenericOptionsParser;
48  import org.apache.hadoop.util.Tool;
49  import org.apache.hadoop.util.ToolRunner;
50  
51  import com.google.common.base.Preconditions;
52  
53  /**
54   * Utility that can merge any two regions in the same table: adjacent,
55   * overlapping or disjoint.
56   */
57  @InterfaceAudience.Public
58  @InterfaceStability.Evolving
59  public class Merge extends Configured implements Tool {
60    static final Log LOG = LogFactory.getLog(Merge.class);
61    private Path rootdir;
62    private volatile MetaUtils utils;
63    private TableName tableName;               // Name of table
64    private volatile byte [] region1;        // Name of region 1
65    private volatile byte [] region2;        // Name of region 2
66    private volatile HRegionInfo mergeInfo;
67  
68    /** default constructor */
69    public Merge() {
70      super();
71    }
72  
73    /**
74     * @param conf configuration
75     */
76    public Merge(Configuration conf) {
77      this.mergeInfo = null;
78      setConf(conf);
79    }
80  
81    public int run(String[] args) throws Exception {
82      if (parseArgs(args) != 0) {
83        return -1;
84      }
85  
86      // Verify file system is up.
87      FileSystem fs = FileSystem.get(getConf());              // get DFS handle
88      LOG.info("Verifying that file system is available...");
89      try {
90        FSUtils.checkFileSystemAvailable(fs);
91      } catch (IOException e) {
92        LOG.fatal("File system is not available", e);
93        return -1;
94      }
95  
96      // Verify HBase is down
97      LOG.info("Verifying that HBase is not running...");
98      try {
99        HBaseAdmin.checkHBaseAvailable(getConf());
100       LOG.fatal("HBase cluster must be off-line, and is not. Aborting.");
101       return -1;
102     } catch (ZooKeeperConnectionException zkce) {
103       // If no zk, presume no master.
104     } catch (MasterNotRunningException e) {
105       // Expected. Ignore.
106     }
107 
108     // Initialize MetaUtils and and get the root of the HBase installation
109 
110     this.utils = new MetaUtils(getConf());
111     this.rootdir = FSUtils.getRootDir(getConf());
112     try {
113       mergeTwoRegions();
114       return 0;
115     } catch (IOException e) {
116       LOG.fatal("Merge failed", e);
117       return -1;
118 
119     } finally {
120       if (this.utils != null) {
121         this.utils.shutdown();
122       }
123     }
124   }
125 
126   /** @return HRegionInfo for merge result */
127   HRegionInfo getMergedHRegionInfo() {
128     return this.mergeInfo;
129   }
130 
131   /*
132    * Merges two regions from a user table.
133    */
134   private void mergeTwoRegions() throws IOException {
135     LOG.info("Merging regions " + Bytes.toStringBinary(this.region1) + " and " +
136         Bytes.toStringBinary(this.region2) + " in table " + this.tableName);
137     HRegion meta = this.utils.getMetaRegion();
138     Get get = new Get(region1);
139     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
140     Result result1 =  meta.get(get);
141     Preconditions.checkState(!result1.isEmpty(),
142         "First region cells can not be null");
143     HRegionInfo info1 = HRegionInfo.getHRegionInfo(result1);
144     if (info1 == null) {
145       throw new NullPointerException("info1 is null using key " +
146           Bytes.toStringBinary(region1) + " in " + meta);
147     }
148     get = new Get(region2);
149     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
150     Result result2 =  meta.get(get);
151     Preconditions.checkState(!result2.isEmpty(),
152         "Second region cells can not be null");
153     HRegionInfo info2 = HRegionInfo.getHRegionInfo(result2);
154     if (info2 == null) {
155       throw new NullPointerException("info2 is null using key " + meta);
156     }
157     TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
158       this.rootdir, this.tableName);
159     HRegion merged = merge(htd.getHTableDescriptor(), meta, info1, info2);
160 
161     LOG.info("Adding " + merged.getRegionInfo() + " to " +
162         meta.getRegionInfo());
163 
164     HRegion.addRegionToMETA(meta, merged);
165     merged.close();
166   }
167 
168   /*
169    * Actually merge two regions and update their info in the meta region(s)
170    * Returns HRegion object for newly merged region
171    */
172   private HRegion merge(final HTableDescriptor htd, HRegion meta,
173                         HRegionInfo info1, HRegionInfo info2)
174   throws IOException {
175     if (info1 == null) {
176       throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " +
177           Bytes.toStringBinary(meta.getRegionName()));
178     }
179     if (info2 == null) {
180       throw new IOException("Could not find " + Bytes.toStringBinary(region2) + " in " +
181           Bytes.toStringBinary(meta.getRegionName()));
182     }
183     HRegion merged = null;
184     HLog log = utils.getLog();
185     HRegion r1 = HRegion.openHRegion(info1, htd, log, getConf());
186     try {
187       HRegion r2 = HRegion.openHRegion(info2, htd, log, getConf());
188       try {
189         merged = HRegion.merge(r1, r2);
190       } finally {
191         if (!r2.isClosed()) {
192           r2.close();
193         }
194       }
195     } finally {
196       if (!r1.isClosed()) {
197         r1.close();
198       }
199     }
200 
201     // Remove the old regions from meta.
202     // HRegion.merge has already deleted their files
203 
204     removeRegionFromMeta(meta, info1);
205     removeRegionFromMeta(meta, info2);
206 
207     this.mergeInfo = merged.getRegionInfo();
208     return merged;
209   }
210 
211   /*
212    * Removes a region's meta information from the passed <code>meta</code>
213    * region.
214    *
215    * @param meta hbase:meta HRegion to be updated
216    * @param regioninfo HRegionInfo of region to remove from <code>meta</code>
217    *
218    * @throws IOException
219    */
220   private void removeRegionFromMeta(HRegion meta, HRegionInfo regioninfo)
221   throws IOException {
222     if (LOG.isDebugEnabled()) {
223       LOG.debug("Removing region: " + regioninfo + " from " + meta);
224     }
225 
226     Delete delete  = new Delete(regioninfo.getRegionName(),
227         System.currentTimeMillis());
228     meta.delete(delete);
229   }
230 
231   /*
232    * Parse given arguments including generic arguments and assign table name and regions names.
233    *
234    * @param args the arguments to parse
235    *
236    * @throws IOException
237    */
238   private int parseArgs(String[] args) throws IOException {
239     GenericOptionsParser parser =
240       new GenericOptionsParser(getConf(), args);
241 
242     String[] remainingArgs = parser.getRemainingArgs();
243     if (remainingArgs.length != 3) {
244       usage();
245       return -1;
246     }
247     tableName = TableName.valueOf(remainingArgs[0]);
248 
249     region1 = Bytes.toBytesBinary(remainingArgs[1]);
250     region2 = Bytes.toBytesBinary(remainingArgs[2]);
251     int status = 0;
252     if (notInTable(tableName, region1) || notInTable(tableName, region2)) {
253       status = -1;
254     } else if (Bytes.equals(region1, region2)) {
255       LOG.error("Can't merge a region with itself");
256       status = -1;
257     }
258     return status;
259   }
260 
261   private boolean notInTable(final TableName tn, final byte [] rn) {
262     if (WritableComparator.compareBytes(tn.getName(), 0, tn.getName().length,
263         rn, 0, tn.getName().length) != 0) {
264       LOG.error("Region " + Bytes.toStringBinary(rn) + " does not belong to table " +
265         tn);
266       return true;
267     }
268     return false;
269   }
270 
271   private void usage() {
272     System.err
273         .println("For hadoop 0.21+, Usage: bin/hbase org.apache.hadoop.hbase.util.Merge "
274             + "[-Dfs.defaultFS=hdfs://nn:port] <table-name> <region-1> <region-2>\n");
275   }
276 
277   public static void main(String[] args) {
278     int status;
279     try {
280       status = ToolRunner.run(HBaseConfiguration.create(), new Merge(), args);
281     } catch (Exception e) {
282       LOG.error("exiting due to error", e);
283       status = -1;
284     }
285     System.exit(status);
286   }
287 }