View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.util;
21  
22  import java.io.IOException;
23  
24  import org.apache.commons.logging.Log;
25  import org.apache.commons.logging.LogFactory;
26  import org.apache.hadoop.hbase.classification.InterfaceAudience;
27  import org.apache.hadoop.conf.Configuration;
28  import org.apache.hadoop.conf.Configured;
29  import org.apache.hadoop.fs.FileSystem;
30  import org.apache.hadoop.fs.Path;
31  import org.apache.hadoop.hbase.HBaseInterfaceAudience;
32  import org.apache.hadoop.hbase.TableName;
33  import org.apache.hadoop.hbase.HBaseConfiguration;
34  import org.apache.hadoop.hbase.HConstants;
35  import org.apache.hadoop.hbase.HRegionInfo;
36  import org.apache.hadoop.hbase.HTableDescriptor;
37  import org.apache.hadoop.hbase.MasterNotRunningException;
38  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
39  import org.apache.hadoop.hbase.client.Delete;
40  import org.apache.hadoop.hbase.client.Get;
41  import org.apache.hadoop.hbase.client.HBaseAdmin;
42  import org.apache.hadoop.hbase.client.Result;
43  import org.apache.hadoop.hbase.regionserver.HRegion;
44  import org.apache.hadoop.hbase.wal.WAL;
45  import org.apache.hadoop.io.WritableComparator;
46  import org.apache.hadoop.util.GenericOptionsParser;
47  import org.apache.hadoop.util.Tool;
48  import org.apache.hadoop.util.ToolRunner;
49  
50  import com.google.common.base.Preconditions;
51  
52  /**
53   * Utility that can merge any two regions in the same table: adjacent,
54   * overlapping or disjoint.
55   */
56  @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
57  public class Merge extends Configured implements Tool {
58    private static final Log LOG = LogFactory.getLog(Merge.class);
59    private Path rootdir;
60    private volatile MetaUtils utils;
61    private TableName tableName;               // Name of table
62    private volatile byte [] region1;        // Name of region 1
63    private volatile byte [] region2;        // Name of region 2
64    private volatile HRegionInfo mergeInfo;
65  
66    /** default constructor */
67    public Merge() {
68      super();
69    }
70  
71    /**
72     * @param conf configuration
73     */
74    public Merge(Configuration conf) {
75      this.mergeInfo = null;
76      setConf(conf);
77    }
78  
79    @Override
80    public int run(String[] args) throws Exception {
81      if (parseArgs(args) != 0) {
82        return -1;
83      }
84  
85      // Verify file system is up.
86      FileSystem fs = FileSystem.get(getConf());              // get DFS handle
87      LOG.info("Verifying that file system is available...");
88      try {
89        FSUtils.checkFileSystemAvailable(fs);
90      } catch (IOException e) {
91        LOG.fatal("File system is not available", e);
92        return -1;
93      }
94  
95      // Verify HBase is down
96      LOG.info("Verifying that HBase is not running...");
97      try {
98        HBaseAdmin.checkHBaseAvailable(getConf());
99        LOG.fatal("HBase cluster must be off-line, and is not. Aborting.");
100       return -1;
101     } catch (ZooKeeperConnectionException zkce) {
102       // If no zk, presume no master.
103     } catch (MasterNotRunningException e) {
104       // Expected. Ignore.
105     }
106 
107     // Initialize MetaUtils and and get the root of the HBase installation
108 
109     this.utils = new MetaUtils(getConf());
110     this.rootdir = FSUtils.getRootDir(getConf());
111     try {
112       mergeTwoRegions();
113       return 0;
114     } catch (IOException e) {
115       LOG.fatal("Merge failed", e);
116       return -1;
117 
118     } finally {
119       if (this.utils != null) {
120         this.utils.shutdown();
121       }
122     }
123   }
124 
125   /** @return HRegionInfo for merge result */
126   HRegionInfo getMergedHRegionInfo() {
127     return this.mergeInfo;
128   }
129 
130   /*
131    * Merges two regions from a user table.
132    */
133   private void mergeTwoRegions() throws IOException {
134     LOG.info("Merging regions " + Bytes.toStringBinary(this.region1) + " and " +
135         Bytes.toStringBinary(this.region2) + " in table " + this.tableName);
136     HRegion meta = this.utils.getMetaRegion();
137     Get get = new Get(region1);
138     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
139     Result result1 =  meta.get(get);
140     Preconditions.checkState(!result1.isEmpty(),
141         "First region cells can not be null");
142     HRegionInfo info1 = HRegionInfo.getHRegionInfo(result1);
143     if (info1 == null) {
144       throw new NullPointerException("info1 is null using key " +
145           Bytes.toStringBinary(region1) + " in " + meta);
146     }
147     get = new Get(region2);
148     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
149     Result result2 =  meta.get(get);
150     Preconditions.checkState(!result2.isEmpty(),
151         "Second region cells can not be null");
152     HRegionInfo info2 = HRegionInfo.getHRegionInfo(result2);
153     if (info2 == null) {
154       throw new NullPointerException("info2 is null using key " + meta);
155     }
156     HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
157       this.rootdir, this.tableName);
158     HRegion merged = merge(htd, meta, info1, info2);
159 
160     LOG.info("Adding " + merged.getRegionInfo() + " to " +
161         meta.getRegionInfo());
162 
163     HRegion.addRegionToMETA(meta, merged);
164     merged.close();
165   }
166 
167   /*
168    * Actually merge two regions and update their info in the meta region(s)
169    * Returns HRegion object for newly merged region
170    */
171   private HRegion merge(final HTableDescriptor htd, HRegion meta,
172                         HRegionInfo info1, HRegionInfo info2)
173   throws IOException {
174     if (info1 == null) {
175       throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " +
176           Bytes.toStringBinary(meta.getRegionInfo().getRegionName()));
177     }
178     if (info2 == null) {
179       throw new IOException("Could not find " + Bytes.toStringBinary(region2) + " in " +
180           Bytes.toStringBinary(meta.getRegionInfo().getRegionName()));
181     }
182     HRegion merged = null;
183     HRegion r1 = HRegion.openHRegion(info1, htd, utils.getLog(info1), getConf());
184     try {
185       HRegion r2 = HRegion.openHRegion(info2, htd, utils.getLog(info2), getConf());
186       try {
187         merged = HRegion.merge(r1, r2);
188       } finally {
189         if (!r2.isClosed()) {
190           r2.close();
191         }
192       }
193     } finally {
194       if (!r1.isClosed()) {
195         r1.close();
196       }
197     }
198 
199     // Remove the old regions from meta.
200     // HRegion.merge has already deleted their files
201 
202     removeRegionFromMeta(meta, info1);
203     removeRegionFromMeta(meta, info2);
204 
205     this.mergeInfo = merged.getRegionInfo();
206     return merged;
207   }
208 
209   /*
210    * Removes a region's meta information from the passed <code>meta</code>
211    * region.
212    *
213    * @param meta hbase:meta HRegion to be updated
214    * @param regioninfo HRegionInfo of region to remove from <code>meta</code>
215    *
216    * @throws IOException
217    */
218   private void removeRegionFromMeta(HRegion meta, HRegionInfo regioninfo)
219   throws IOException {
220     if (LOG.isDebugEnabled()) {
221       LOG.debug("Removing region: " + regioninfo + " from " + meta);
222     }
223 
224     Delete delete  = new Delete(regioninfo.getRegionName(),
225         System.currentTimeMillis());
226     meta.delete(delete);
227   }
228 
229   /*
230    * Parse given arguments including generic arguments and assign table name and regions names.
231    *
232    * @param args the arguments to parse
233    *
234    * @throws IOException
235    */
236   private int parseArgs(String[] args) throws IOException {
237     GenericOptionsParser parser =
238       new GenericOptionsParser(getConf(), args);
239 
240     String[] remainingArgs = parser.getRemainingArgs();
241     if (remainingArgs.length != 3) {
242       usage();
243       return -1;
244     }
245     tableName = TableName.valueOf(remainingArgs[0]);
246 
247     region1 = Bytes.toBytesBinary(remainingArgs[1]);
248     region2 = Bytes.toBytesBinary(remainingArgs[2]);
249     int status = 0;
250     if (notInTable(tableName, region1) || notInTable(tableName, region2)) {
251       status = -1;
252     } else if (Bytes.equals(region1, region2)) {
253       LOG.error("Can't merge a region with itself");
254       status = -1;
255     }
256     return status;
257   }
258 
259   private boolean notInTable(final TableName tn, final byte [] rn) {
260     if (WritableComparator.compareBytes(tn.getName(), 0, tn.getName().length,
261         rn, 0, tn.getName().length) != 0) {
262       LOG.error("Region " + Bytes.toStringBinary(rn) + " does not belong to table " +
263         tn);
264       return true;
265     }
266     return false;
267   }
268 
269   private void usage() {
270     System.err
271         .println("For hadoop 0.21+, Usage: bin/hbase org.apache.hadoop.hbase.util.Merge "
272             + "[-Dfs.defaultFS=hdfs://nn:port] <table-name> <region-1> <region-2>\n");
273   }
274 
275   public static void main(String[] args) {
276     int status;
277     try {
278       status = ToolRunner.run(HBaseConfiguration.create(), new Merge(), args);
279     } catch (Exception e) {
280       LOG.error("exiting due to error", e);
281       status = -1;
282     }
283     System.exit(status);
284   }
285 }