View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.util;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.List;
24  import java.util.NoSuchElementException;
25  import java.util.Random;
26  
27  import org.apache.commons.logging.Log;
28  import org.apache.commons.logging.LogFactory;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.fs.FileSystem;
31  import org.apache.hadoop.fs.Path;
32  import org.apache.hadoop.hbase.HConstants;
33  import org.apache.hadoop.hbase.HRegionInfo;
34  import org.apache.hadoop.hbase.HTableDescriptor;
35  import org.apache.hadoop.hbase.MetaTableAccessor;
36  import org.apache.hadoop.hbase.TableName;
37  import org.apache.hadoop.hbase.TableNotDisabledException;
38  import org.apache.hadoop.hbase.classification.InterfaceAudience;
39  import org.apache.hadoop.hbase.client.Admin;
40  import org.apache.hadoop.hbase.client.ClusterConnection;
41  import org.apache.hadoop.hbase.client.Connection;
42  import org.apache.hadoop.hbase.client.ConnectionFactory;
43  import org.apache.hadoop.hbase.client.Delete;
44  import org.apache.hadoop.hbase.client.Result;
45  import org.apache.hadoop.hbase.client.ResultScanner;
46  import org.apache.hadoop.hbase.client.Table;
47  import org.apache.hadoop.hbase.regionserver.HRegion;
48  import org.apache.hadoop.hbase.wal.WALFactory;
49  import org.apache.hadoop.ipc.RemoteException;
50  
51  /**
52   * A non-instantiable class that has a static method capable of compacting
53   * a table by merging adjacent regions.
54   */
55  @InterfaceAudience.Private
56  class HMerge {
57    // TODO: Where is this class used?  How does it relate to Merge in same package?
58    private static final Log LOG = LogFactory.getLog(HMerge.class);
59    static final Random rand = new Random();
60  
61    /*
62     * Not instantiable
63     */
64    private HMerge() {
65      super();
66    }
67  
68    /**
69     * Scans the table and merges two adjacent regions if they are small. This
70     * only happens when a lot of rows are deleted.
71     *
72     * When merging the hbase:meta region, the HBase instance must be offline.
73     * When merging a normal table, the HBase instance must be online, but the
74     * table must be disabled.
75     *
76     * @param conf        - configuration object for HBase
77     * @param fs          - FileSystem where regions reside
78     * @param tableName   - Table to be compacted
79     * @throws IOException
80     */
81    public static void merge(Configuration conf, FileSystem fs,
82      final TableName tableName)
83    throws IOException {
84      merge(conf, fs, tableName, true);
85    }
86  
87    /**
88     * Scans the table and merges two adjacent regions if they are small. This
89     * only happens when a lot of rows are deleted.
90     *
91     * When merging the hbase:meta region, the HBase instance must be offline.
92     * When merging a normal table, the HBase instance must be online, but the
93     * table must be disabled.
94     *
95     * @param conf        - configuration object for HBase
96     * @param fs          - FileSystem where regions reside
97     * @param tableName   - Table to be compacted
98     * @param testMasterRunning True if we are to verify master is down before
99     * running merge
100    * @throws IOException
101    */
102   public static void merge(Configuration conf, FileSystem fs,
103     final TableName tableName, final boolean testMasterRunning)
104   throws IOException {
105     boolean masterIsRunning = false;
106     ClusterConnection hConnection = null;
107     if (testMasterRunning) {
108       try {
109         hConnection = (ClusterConnection) ConnectionFactory.createConnection(conf);
110         masterIsRunning = hConnection.isMasterRunning();
111       } finally {
112         if (hConnection != null) {
113           hConnection.close();
114         }
115       }
116     }
117     if (tableName.equals(TableName.META_TABLE_NAME)) {
118       if (masterIsRunning) {
119         throw new IllegalStateException(
120             "Can not compact hbase:meta table if instance is on-line");
121       }
122       // TODO reenable new OfflineMerger(conf, fs).process();
123     } else {
124       if(!masterIsRunning) {
125         throw new IllegalStateException(
126             "HBase instance must be running to merge a normal table");
127       }
128       try (Connection conn = ConnectionFactory.createConnection(conf);
129           Admin admin = conn.getAdmin()) {
130         if (!admin.isTableDisabled(tableName)) {
131           throw new TableNotDisabledException(tableName);
132         }
133       }
134       new OnlineMerger(conf, fs, tableName).process();
135     }
136   }
137 
138   private static abstract class Merger {
139     protected final Configuration conf;
140     protected final FileSystem fs;
141     protected final Path rootDir;
142     protected final HTableDescriptor htd;
143     protected final WALFactory walFactory;
144     private final long maxFilesize;
145 
146 
147     protected Merger(Configuration conf, FileSystem fs, final TableName tableName)
148     throws IOException {
149       this.conf = conf;
150       this.fs = fs;
151       this.maxFilesize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
152           HConstants.DEFAULT_MAX_FILE_SIZE);
153 
154       this.rootDir = FSUtils.getRootDir(conf);
155       Path tabledir = FSUtils.getTableDir(this.rootDir, tableName);
156       this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir);
157       String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;
158
159       final Configuration walConf = new Configuration(conf);
160       FSUtils.setRootDir(walConf, tabledir);
161       this.walFactory = new WALFactory(walConf, null, logname);
162     }
163
164     void process() throws IOException {
165       try {
166         for (HRegionInfo[] regionsToMerge = next();
167             regionsToMerge != null;
168             regionsToMerge = next()) {
169           if (!merge(regionsToMerge)) {
170             return;
171           }
172         }
173       } finally {
174         try {
175           walFactory.close();
176         } catch(IOException e) {
177           LOG.error(e);
178         }
179       }
180     }
181
182     protected boolean merge(final HRegionInfo[] info) throws IOException {
183       if (info.length < 2) {
184         LOG.info("only one region - nothing to merge");
185         return false;
186       }
187
188       HRegion currentRegion = null;
189       long currentSize = 0;
190       HRegion nextRegion = null;
191       long nextSize = 0;
192       for (int i = 0; i < info.length - 1; i++) {
193         if (currentRegion == null) {
194           currentRegion = HRegion.openHRegion(conf, fs, this.rootDir, info[i], this.htd,
195               walFactory.getWAL(info[i].getEncodedNameAsBytes(),
196                 info[i].getTable().getNamespace()));
197           currentSize = currentRegion.getLargestHStoreSize();
198         }
199         nextRegion = HRegion.openHRegion(conf, fs, this.rootDir, info[i + 1], this.htd,
200             walFactory.getWAL(info[i + 1].getEncodedNameAsBytes(),
201               info[i + 1].getTable().getNamespace()));
202         nextSize = nextRegion.getLargestHStoreSize();
203
204         if ((currentSize + nextSize) <= (maxFilesize / 2)) {
205           // We merge two adjacent regions if their total size is less than
206           // one half of the desired maximum size
207           LOG.info("Merging regions " + currentRegion.getRegionInfo().getRegionNameAsString() +
208             " and " + nextRegion.getRegionInfo().getRegionNameAsString());
209           HRegion mergedRegion =
210             HRegion.mergeAdjacent(currentRegion, nextRegion);
211           updateMeta(currentRegion.getRegionInfo().getRegionName(),
212             nextRegion.getRegionInfo().getRegionName(), mergedRegion);
213           break;
214         }
215         LOG.info("not merging regions " +
216           Bytes.toStringBinary(currentRegion.getRegionInfo().getRegionName()) +
217             " and " + Bytes.toStringBinary(nextRegion.getRegionInfo().getRegionName()));
218         currentRegion.close();
219         currentRegion = nextRegion;
220         currentSize = nextSize;
221       }
222       if(currentRegion != null) {
223         currentRegion.close();
224       }
225       return true;
226     }
227
228     protected abstract HRegionInfo[] next() throws IOException;
229
230     protected abstract void updateMeta(final byte [] oldRegion1,
231       final byte [] oldRegion2, HRegion newRegion)
232     throws IOException;
233
234   }
235
236   /** Instantiated to compact a normal user table */
237   private static class OnlineMerger extends Merger {
238     private final TableName tableName;
239     private final Table table;
240     private final ResultScanner metaScanner;
241     private HRegionInfo latestRegion;
242
243     OnlineMerger(Configuration conf, FileSystem fs,
244       final TableName tableName)
245     throws IOException {
246       super(conf, fs, tableName);
247       this.tableName = tableName;
248       Connection connection = ConnectionFactory.createConnection(conf);
249       this.table = connection.getTable(TableName.META_TABLE_NAME);
250       this.metaScanner = table.getScanner(HConstants.CATALOG_FAMILY,
251           HConstants.REGIONINFO_QUALIFIER);
252       this.latestRegion = null;
253     }
254
255     private HRegionInfo nextRegion() throws IOException {
256       try {
257         Result results = getMetaRow();
258         if (results == null) {
259           return null;
260         }
261         HRegionInfo region = MetaTableAccessor.getHRegionInfo(results);
262         if (region == null) {
263           throw new NoSuchElementException("meta region entry missing " +
264               Bytes.toString(HConstants.CATALOG_FAMILY) + ":" +
265               Bytes.toString(HConstants.REGIONINFO_QUALIFIER));
266         }
267         if (!region.getTable().equals(this.tableName)) {
268           return null;
269         }
270         return region;
271       } catch (IOException e) {
272         e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
273         LOG.error("meta scanner error", e);
274         metaScanner.close();
275         throw e;
276       }
277     }
278
279     /*
280      * Check current row has a HRegionInfo.  Skip to next row if HRI is empty.
281      * @return A Map of the row content else null if we are off the end.
282      * @throws IOException
283      */
284     private Result getMetaRow() throws IOException {
285       Result currentRow = metaScanner.next();
286       boolean foundResult = false;
287       while (currentRow != null) {
288         LOG.info("Row: <" + Bytes.toStringBinary(currentRow.getRow()) + ">");
289         byte[] regionInfoValue = currentRow.getValue(HConstants.CATALOG_FAMILY,
290             HConstants.REGIONINFO_QUALIFIER);
291         if (regionInfoValue == null || regionInfoValue.length == 0) {
292           currentRow = metaScanner.next();
293           continue;
294         }
295         HRegionInfo region = MetaTableAccessor.getHRegionInfo(currentRow);
296         if (!region.getTable().equals(this.tableName)) {
297           currentRow = metaScanner.next();
298           continue;
299         }
300         foundResult = true;
301         break;
302       }
303       return foundResult ? currentRow : null;
304     }
305
306     @Override
307     protected HRegionInfo[] next() throws IOException {
308       List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
309       if(latestRegion == null) {
310         latestRegion = nextRegion();
311       }
312       if(latestRegion != null) {
313         regions.add(latestRegion);
314       }
315       latestRegion = nextRegion();
316       if(latestRegion != null) {
317         regions.add(latestRegion);
318       }
319       return regions.toArray(new HRegionInfo[regions.size()]);
320     }
321
322     @Override
323     protected void updateMeta(final byte [] oldRegion1,
324         final byte [] oldRegion2,
325       HRegion newRegion)
326     throws IOException {
327       byte[][] regionsToDelete = {oldRegion1, oldRegion2};
328       for (int r = 0; r < regionsToDelete.length; r++) {
329         if(Bytes.equals(regionsToDelete[r], latestRegion.getRegionName())) {
330           latestRegion = null;
331         }
332         Delete delete = new Delete(regionsToDelete[r]);
333         table.delete(delete);
334         if(LOG.isDebugEnabled()) {
335           LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r]));
336         }
337       }
338       newRegion.getRegionInfo().setOffline(true);
339
340       MetaTableAccessor.addRegionToMeta(table, newRegion.getRegionInfo());
341
342       if(LOG.isDebugEnabled()) {
343         LOG.debug("updated columns in row: "
344             + Bytes.toStringBinary(newRegion.getRegionInfo().getRegionName()));
345       }
346     }
347   }
348 }