View Javadoc

1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  
21  package org.apache.hadoop.hbase.util;
22  
23  import org.apache.commons.logging.Log;
24  import org.apache.commons.logging.LogFactory;
25  import org.apache.hadoop.conf.Configuration;
26  import org.apache.hadoop.conf.Configured;
27  import org.apache.hadoop.fs.FileSystem;
28  import org.apache.hadoop.fs.Path;
29  import org.apache.hadoop.hbase.HBaseConfiguration;
30  import org.apache.hadoop.hbase.HConstants;
31  import org.apache.hadoop.hbase.HRegionInfo;
32  import org.apache.hadoop.hbase.HTableDescriptor;
33  import org.apache.hadoop.hbase.KeyValue;
34  import org.apache.hadoop.hbase.MasterNotRunningException;
35  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
36  import org.apache.hadoop.hbase.client.Delete;
37  import org.apache.hadoop.hbase.client.Get;
38  import org.apache.hadoop.hbase.client.HBaseAdmin;
39  import org.apache.hadoop.hbase.regionserver.HRegion;
40  import org.apache.hadoop.hbase.regionserver.wal.HLog;
41  import org.apache.hadoop.io.WritableComparator;
42  import org.apache.hadoop.util.GenericOptionsParser;
43  import org.apache.hadoop.util.Tool;
44  import org.apache.hadoop.util.ToolRunner;
45  
46  import java.io.IOException;
47  import java.util.List;
48  
49  /**
50   * Utility that can merge any two regions in the same table: adjacent,
51   * overlapping or disjoint.
52   */
53  public class Merge extends Configured implements Tool {
54    static final Log LOG = LogFactory.getLog(Merge.class);
55    private Path rootdir;
56    private volatile MetaUtils utils;
57    private byte [] tableName;               // Name of table
58    private volatile byte [] region1;        // Name of region 1
59    private volatile byte [] region2;        // Name of region 2
60    private volatile boolean isMetaTable;
61    private volatile HRegionInfo mergeInfo;
62  
63    /** default constructor */
64    public Merge() {
65      super();
66    }
67  
68    /**
69     * @param conf configuration
70     */
71    public Merge(Configuration conf) {
72      this.mergeInfo = null;
73      setConf(conf);
74    }
75  
76    public int run(String[] args) throws Exception {
77      if (parseArgs(args) != 0) {
78        return -1;
79      }
80  
81      // Verify file system is up.
82      FileSystem fs = FileSystem.get(getConf());              // get DFS handle
83      LOG.info("Verifying that file system is available...");
84      try {
85        FSUtils.checkFileSystemAvailable(fs);
86      } catch (IOException e) {
87        LOG.fatal("File system is not available", e);
88        return -1;
89      }
90  
91      // Verify HBase is down
92      LOG.info("Verifying that HBase is not running...");
93      try {
94        HBaseAdmin.checkHBaseAvailable(getConf());
95        LOG.fatal("HBase cluster must be off-line.");
96        return -1;
97      } catch (ZooKeeperConnectionException zkce) {
98        // If no zk, presume no master.
99      } catch (MasterNotRunningException e) {
100       // Expected. Ignore.
101     }
102 
103     // Initialize MetaUtils and and get the root of the HBase installation
104 
105     this.utils = new MetaUtils(getConf());
106     this.rootdir = FSUtils.getRootDir(getConf());
107     try {
108       if (isMetaTable) {
109         mergeTwoMetaRegions();
110       } else {
111         mergeTwoRegions();
112       }
113       return 0;
114     } catch (Exception e) {
115       LOG.fatal("Merge failed", e);
116       utils.scanMetaRegion(HRegionInfo.FIRST_META_REGIONINFO,
117           new MetaUtils.ScannerListener() {
118             public boolean processRow(HRegionInfo info) {
119               System.err.println(info.toString());
120               return true;
121             }
122           }
123       );
124 
125       return -1;
126 
127     } finally {
128       if (this.utils != null) {
129         this.utils.shutdown();
130       }
131     }
132   }
133 
134   /** @return HRegionInfo for merge result */
135   HRegionInfo getMergedHRegionInfo() {
136     return this.mergeInfo;
137   }
138 
139   /*
140    * Merge two meta regions. This is unlikely to be needed soon as we have only
141    * seend the meta table split once and that was with 64MB regions. With 256MB
142    * regions, it will be some time before someone has enough data in HBase to
143    * split the meta region and even less likely that a merge of two meta
144    * regions will be needed, but it is included for completeness.
145    */
146   private void mergeTwoMetaRegions() throws IOException {
147     HRegion rootRegion = utils.getRootRegion();
148     Get get = new Get(region1);
149     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
150     List<KeyValue> cells1 =  rootRegion.get(get, null).list();
151     HRegionInfo info1 = Writables.getHRegionInfo((cells1 == null)? null: cells1.get(0).getValue());
152 
153     get = new Get(region2);
154     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
155     List<KeyValue> cells2 =  rootRegion.get(get, null).list();
156     HRegionInfo info2 = Writables.getHRegionInfo((cells2 == null)? null: cells2.get(0).getValue());
157     HRegion merged = merge(HTableDescriptor.META_TABLEDESC, info1, rootRegion, info2, rootRegion);
158     LOG.info("Adding " + merged.getRegionInfo() + " to " +
159         rootRegion.getRegionInfo());
160     HRegion.addRegionToMETA(rootRegion, merged);
161     merged.close();
162   }
163 
164   private static class MetaScannerListener
165   implements MetaUtils.ScannerListener {
166     private final byte [] region1;
167     private final byte [] region2;
168     private HRegionInfo meta1 = null;
169     private HRegionInfo meta2 = null;
170 
171     MetaScannerListener(final byte [] region1, final byte [] region2) {
172       this.region1 = region1;
173       this.region2 = region2;
174     }
175 
176     public boolean processRow(HRegionInfo info) {
177       if (meta1 == null && HRegion.rowIsInRange(info, region1)) {
178         meta1 = info;
179       }
180       if (region2 != null && meta2 == null &&
181           HRegion.rowIsInRange(info, region2)) {
182         meta2 = info;
183       }
184       return meta1 == null || (region2 != null && meta2 == null);
185     }
186 
187     HRegionInfo getMeta1() {
188       return meta1;
189     }
190 
191     HRegionInfo getMeta2() {
192       return meta2;
193     }
194   }
195 
196   /*
197    * Merges two regions from a user table.
198    */
199   private void mergeTwoRegions() throws IOException {
200     LOG.info("Merging regions " + Bytes.toStringBinary(this.region1) + " and " +
201         Bytes.toStringBinary(this.region2) + " in table " + Bytes.toString(this.tableName));
202     // Scan the root region for all the meta regions that contain the regions
203     // we're merging.
204     MetaScannerListener listener = new MetaScannerListener(region1, region2);
205     this.utils.scanRootRegion(listener);
206     HRegionInfo meta1 = listener.getMeta1();
207     if (meta1 == null) {
208       throw new IOException("Could not find meta region for " + Bytes.toStringBinary(region1));
209     }
210     HRegionInfo meta2 = listener.getMeta2();
211     if (meta2 == null) {
212       throw new IOException("Could not find meta region for " + Bytes.toStringBinary(region2));
213     }
214     LOG.info("Found meta for region1 " + Bytes.toStringBinary(meta1.getRegionName()) +
215       ", meta for region2 " + Bytes.toStringBinary(meta2.getRegionName()));
216     HRegion metaRegion1 = this.utils.getMetaRegion(meta1);
217     Get get = new Get(region1);
218     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
219     List<KeyValue> cells1 =  metaRegion1.get(get, null).list();
220     HRegionInfo info1 =
221       Writables.getHRegionInfo((cells1 == null)? null: cells1.get(0).getValue());
222     if (info1 == null) {
223       throw new NullPointerException("info1 is null using key " +
224           Bytes.toStringBinary(region1) + " in " + meta1);
225     }
226 
227     HRegion metaRegion2;
228     if (Bytes.equals(meta1.getRegionName(), meta2.getRegionName())) {
229       metaRegion2 = metaRegion1;
230     } else {
231       metaRegion2 = utils.getMetaRegion(meta2);
232     }
233     get = new Get(region2);
234     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
235     List<KeyValue> cells2 =  metaRegion2.get(get, null).list();
236     HRegionInfo info2 = Writables.getHRegionInfo((cells2 == null)? null: cells2.get(0).getValue());
237     if (info2 == null) {
238       throw new NullPointerException("info2 is null using key " + meta2);
239     }
240     HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(FileSystem.get(getConf()),
241       this.rootdir, this.tableName);
242     HRegion merged = merge(htd, info1, metaRegion1, info2, metaRegion2);
243 
244     // Now find the meta region which will contain the newly merged region
245 
246     listener = new MetaScannerListener(merged.getRegionName(), null);
247     utils.scanRootRegion(listener);
248     HRegionInfo mergedInfo = listener.getMeta1();
249     if (mergedInfo == null) {
250       throw new IOException("Could not find meta region for " +
251           Bytes.toStringBinary(merged.getRegionName()));
252     }
253     HRegion mergeMeta;
254     if (Bytes.equals(mergedInfo.getRegionName(), meta1.getRegionName())) {
255       mergeMeta = metaRegion1;
256     } else if (Bytes.equals(mergedInfo.getRegionName(), meta2.getRegionName())) {
257       mergeMeta = metaRegion2;
258     } else {
259       mergeMeta = utils.getMetaRegion(mergedInfo);
260     }
261     LOG.info("Adding " + merged.getRegionInfo() + " to " +
262         mergeMeta.getRegionInfo());
263 
264     HRegion.addRegionToMETA(mergeMeta, merged);
265     merged.close();
266   }
267 
268   /*
269    * Actually merge two regions and update their info in the meta region(s)
270    * If the meta is split, meta1 may be different from meta2. (and we may have
271    * to scan the meta if the resulting merged region does not go in either)
272    * Returns HRegion object for newly merged region
273    */
274   private HRegion merge(final HTableDescriptor htd, HRegionInfo info1,
275       HRegion meta1, HRegionInfo info2, HRegion meta2)
276   throws IOException {
277     if (info1 == null) {
278       throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " +
279           Bytes.toStringBinary(meta1.getRegionName()));
280     }
281     if (info2 == null) {
282       throw new IOException("Cound not find " + Bytes.toStringBinary(region2) + " in " +
283           Bytes.toStringBinary(meta2.getRegionName()));
284     }
285     HRegion merged = null;
286     HLog log = utils.getLog();
287     HRegion r1 = HRegion.openHRegion(info1, htd, log, getConf());
288     try {
289       HRegion r2 = HRegion.openHRegion(info2, htd, log, getConf());
290       try {
291         merged = HRegion.merge(r1, r2);
292       } finally {
293         if (!r2.isClosed()) {
294           r2.close();
295         }
296       }
297     } finally {
298       if (!r1.isClosed()) {
299         r1.close();
300       }
301     }
302 
303     // Remove the old regions from meta.
304     // HRegion.merge has already deleted their files
305 
306     removeRegionFromMeta(meta1, info1);
307     removeRegionFromMeta(meta2, info2);
308 
309     this.mergeInfo = merged.getRegionInfo();
310     return merged;
311   }
312 
313   /*
314    * Removes a region's meta information from the passed <code>meta</code>
315    * region.
316    *
317    * @param meta META HRegion to be updated
318    * @param regioninfo HRegionInfo of region to remove from <code>meta</code>
319    *
320    * @throws IOException
321    */
322   private void removeRegionFromMeta(HRegion meta, HRegionInfo regioninfo)
323   throws IOException {
324     if (LOG.isDebugEnabled()) {
325       LOG.debug("Removing region: " + regioninfo + " from " + meta);
326     }
327 
328     Delete delete  = new Delete(regioninfo.getRegionName(),
329         System.currentTimeMillis(), null);
330     meta.delete(delete, null, true);
331   }
332 
333   /*
334    * Adds a region's meta information from the passed <code>meta</code>
335    * region.
336    *
337    * @param metainfo META HRegionInfo to be updated
338    * @param region HRegion to add to <code>meta</code>
339    *
340    * @throws IOException
341    */
342   private int parseArgs(String[] args) throws IOException {
343     GenericOptionsParser parser =
344       new GenericOptionsParser(getConf(), args);
345 
346     String[] remainingArgs = parser.getRemainingArgs();
347     if (remainingArgs.length != 3) {
348       usage();
349       return -1;
350     }
351     tableName = Bytes.toBytes(remainingArgs[0]);
352     isMetaTable = Bytes.compareTo(tableName, HConstants.META_TABLE_NAME) == 0;
353 
354     region1 = Bytes.toBytesBinary(remainingArgs[1]);
355     region2 = Bytes.toBytesBinary(remainingArgs[2]);
356     int status = 0;
357     if (notInTable(tableName, region1) || notInTable(tableName, region2)) {
358       status = -1;
359     } else if (Bytes.equals(region1, region2)) {
360       LOG.error("Can't merge a region with itself");
361       status = -1;
362     }
363     return status;
364   }
365 
366   private boolean notInTable(final byte [] tn, final byte [] rn) {
367     if (WritableComparator.compareBytes(tn, 0, tn.length, rn, 0, tn.length) != 0) {
368       LOG.error("Region " + Bytes.toStringBinary(rn) + " does not belong to table " +
369         Bytes.toString(tn));
370       return true;
371     }
372     return false;
373   }
374 
375   private void usage() {
376     System.err
377         .println("For hadoop 0.20,  Usage: bin/hbase org.apache.hadoop.hbase.util.Merge "
378             + "[-Dfs.default.name=hdfs://nn:port] <table-name> <region-1> <region-2>\n");
379     System.err
380         .println("For hadoop 0.21+, Usage: bin/hbase org.apache.hadoop.hbase.util.Merge "
381             + "[-Dfs.defaultFS=hdfs://nn:port] <table-name> <region-1> <region-2>\n");
382   }
383 
384   public static void main(String[] args) {
385     int status;
386     try {
387       status = ToolRunner.run(HBaseConfiguration.create(), new Merge(), args);
388     } catch (Exception e) {
389       LOG.error("exiting due to error", e);
390       status = -1;
391     }
392     System.exit(status);
393   }
394 }