View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.FileNotFoundException;
22  import java.io.IOException;
23  import java.util.Comparator;
24  import java.util.HashSet;
25  import java.util.Map;
26  import java.util.TreeMap;
27  import java.util.concurrent.atomic.AtomicBoolean;
28  import java.util.concurrent.atomic.AtomicInteger;
29  
30  import org.apache.commons.logging.Log;
31  import org.apache.commons.logging.LogFactory;
32  import org.apache.hadoop.fs.FileSystem;
33  import org.apache.hadoop.fs.Path;
34  import org.apache.hadoop.hbase.HColumnDescriptor;
35  import org.apache.hadoop.hbase.HConstants;
36  import org.apache.hadoop.hbase.HRegionInfo;
37  import org.apache.hadoop.hbase.HTableDescriptor;
38  import org.apache.hadoop.hbase.MetaTableAccessor;
39  import org.apache.hadoop.hbase.ScheduledChore;
40  import org.apache.hadoop.hbase.Server;
41  import org.apache.hadoop.hbase.TableName;
42  import org.apache.hadoop.hbase.backup.HFileArchiver;
43  import org.apache.hadoop.hbase.classification.InterfaceAudience;
44  import org.apache.hadoop.hbase.client.Connection;
45  import org.apache.hadoop.hbase.client.Result;
46  import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
47  import org.apache.hadoop.hbase.util.Bytes;
48  import org.apache.hadoop.hbase.util.FSUtils;
49  import org.apache.hadoop.hbase.util.Pair;
50  import org.apache.hadoop.hbase.util.PairOfSameType;
51  import org.apache.hadoop.hbase.util.Triple;
52  
53  /**
54   * A janitor for the catalog tables.  Scans the <code>hbase:meta</code> catalog
55   * table on a period looking for unused regions to garbage collect.
56   */
57  @InterfaceAudience.Private
58  public class CatalogJanitor extends ScheduledChore {
59    private static final Log LOG = LogFactory.getLog(CatalogJanitor.class.getName());
60    private final Server server;
61    private final MasterServices services;
62    private AtomicBoolean enabled = new AtomicBoolean(true);
63    private AtomicBoolean alreadyRunning = new AtomicBoolean(false);
64    private final Connection connection;
65  
66    CatalogJanitor(final Server server, final MasterServices services) {
67      super("CatalogJanitor-" + server.getServerName().toShortString(), server, server
68          .getConfiguration().getInt("hbase.catalogjanitor.interval", 300000));
69      this.server = server;
70      this.services = services;
71      this.connection = server.getConnection();
72    }
73  
74    @Override
75    protected boolean initialChore() {
76      try {
77        if (this.enabled.get()) scan();
78      } catch (IOException e) {
79        LOG.warn("Failed initial scan of catalog table", e);
80        return false;
81      }
82      return true;
83    }
84  
85    /**
86     * @param enabled
87     */
88    public boolean setEnabled(final boolean enabled) {
89      return this.enabled.getAndSet(enabled);
90    }
91  
92    boolean getEnabled() {
93      return this.enabled.get();
94    }
95  
96    @Override
97    protected void chore() {
98      try {
99        AssignmentManager am = this.services.getAssignmentManager();
100       if (this.enabled.get()
101           && am != null
102           && am.isFailoverCleanupDone()
103           && am.getRegionStates().getRegionsInTransition().size() == 0) {
104         scan();
105       } else {
106         LOG.warn("CatalogJanitor disabled! Not running scan.");
107       }
108     } catch (IOException e) {
109       LOG.warn("Failed scan of catalog table", e);
110     }
111   }
112 
113   /**
114    * Scans hbase:meta and returns a number of scanned rows, and a map of merged
115    * regions, and an ordered map of split parents.
116    * @return triple of scanned rows, map of merged regions and map of split
117    *         parent regioninfos
118    * @throws IOException
119    */
120   Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>
121     getMergedRegionsAndSplitParents() throws IOException {
122     return getMergedRegionsAndSplitParents(null);
123   }
124 
125   /**
126    * Scans hbase:meta and returns a number of scanned rows, and a map of merged
127    * regions, and an ordered map of split parents. if the given table name is
128    * null, return merged regions and split parents of all tables, else only the
129    * specified table
130    * @param tableName null represents all tables
131    * @return triple of scanned rows, and map of merged regions, and map of split
132    *         parent regioninfos
133    * @throws IOException
134    */
135   Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>
136     getMergedRegionsAndSplitParents(final TableName tableName) throws IOException {
137     final boolean isTableSpecified = (tableName != null);
138     // TODO: Only works with single hbase:meta region currently.  Fix.
139     final AtomicInteger count = new AtomicInteger(0);
140     // Keep Map of found split parents.  There are candidates for cleanup.
141     // Use a comparator that has split parents come before its daughters.
142     final Map<HRegionInfo, Result> splitParents =
143       new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
144     final Map<HRegionInfo, Result> mergedRegions = new TreeMap<HRegionInfo, Result>();
145     // This visitor collects split parents and counts rows in the hbase:meta table
146 
147     MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
148       @Override
149       public boolean visit(Result r) throws IOException {
150         if (r == null || r.isEmpty()) return true;
151         count.incrementAndGet();
152         HRegionInfo info = MetaTableAccessor.getHRegionInfo(r);
153         if (info == null) return true; // Keep scanning
154         if (isTableSpecified
155             && info.getTable().compareTo(tableName) > 0) {
156           // Another table, stop scanning
157           return false;
158         }
159         if (info.isSplitParent()) splitParents.put(info, r);
160         if (r.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER) != null) {
161           mergedRegions.put(info, r);
162         }
163         // Returning true means "keep scanning"
164         return true;
165       }
166     };
167 
168     // Run full scan of hbase:meta catalog table passing in our custom visitor with
169     // the start row
170     MetaTableAccessor.scanMetaForTableRegions(this.connection, visitor, tableName);
171 
172     return new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(
173         count.get(), mergedRegions, splitParents);
174   }
175 
176   /**
177    * If merged region no longer holds reference to the merge regions, archive
178    * merge region on hdfs and perform deleting references in hbase:meta
179    * @param mergedRegion
180    * @param regionA
181    * @param regionB
182    * @return true if we delete references in merged region on hbase:meta and archive
183    *         the files on the file system
184    * @throws IOException
185    */
186   boolean cleanMergeRegion(final HRegionInfo mergedRegion,
187       final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
188     FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
189     Path rootdir = this.services.getMasterFileSystem().getRootDir();
190     Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
191     HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
192     HRegionFileSystem regionFs = null;
193     try {
194       regionFs = HRegionFileSystem.openRegionFromFileSystem(
195           this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
196     } catch (IOException e) {
197       LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
198     }
199     if (regionFs == null || !regionFs.hasReferences(htd)) {
200       LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
201           + regionB.getRegionNameAsString()
202           + " from fs because merged region no longer holds references");
203       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
204       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
205       MetaTableAccessor.deleteMergeQualifiers(server.getConnection(),
206         mergedRegion);
207       return true;
208     }
209     return false;
210   }
211 
212   /**
213    * Run janitorial scan of catalog <code>hbase:meta</code> table looking for
214    * garbage to collect.
215    * @return number of cleaned regions
216    * @throws IOException
217    */
218   int scan() throws IOException {
219     try {
220       if (!alreadyRunning.compareAndSet(false, true)) {
221         LOG.debug("CatalogJanitor already running");
222         return 0;
223       }
224       Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>> scanTriple =
225         getMergedRegionsAndSplitParents();
226       int count = scanTriple.getFirst();
227       /**
228        * clean merge regions first
229        */
230       int mergeCleaned = 0;
231       Map<HRegionInfo, Result> mergedRegions = scanTriple.getSecond();
232       for (Map.Entry<HRegionInfo, Result> e : mergedRegions.entrySet()) {
233         PairOfSameType<HRegionInfo> p = MetaTableAccessor.getMergeRegions(e.getValue());
234         HRegionInfo regionA = p.getFirst();
235         HRegionInfo regionB = p.getSecond();
236         if (regionA == null || regionB == null) {
237           LOG.warn("Unexpected references regionA="
238               + (regionA == null ? "null" : regionA.getRegionNameAsString())
239               + ",regionB="
240               + (regionB == null ? "null" : regionB.getRegionNameAsString())
241               + " in merged region " + e.getKey().getRegionNameAsString());
242         } else {
243           if (cleanMergeRegion(e.getKey(), regionA, regionB)) {
244             mergeCleaned++;
245           }
246         }
247       }
248       /**
249        * clean split parents
250        */
251       Map<HRegionInfo, Result> splitParents = scanTriple.getThird();
252 
253       // Now work on our list of found parents. See if any we can clean up.
254       int splitCleaned = 0;
255       // regions whose parents are still around
256       HashSet<String> parentNotCleaned = new HashSet<String>();
257       for (Map.Entry<HRegionInfo, Result> e : splitParents.entrySet()) {
258         if (!parentNotCleaned.contains(e.getKey().getEncodedName()) &&
259             cleanParent(e.getKey(), e.getValue())) {
260           splitCleaned++;
261         } else {
262           // We could not clean the parent, so it's daughters should not be
263           // cleaned either (HBASE-6160)
264           PairOfSameType<HRegionInfo> daughters =
265               MetaTableAccessor.getDaughterRegions(e.getValue());
266           parentNotCleaned.add(daughters.getFirst().getEncodedName());
267           parentNotCleaned.add(daughters.getSecond().getEncodedName());
268         }
269       }
270       if ((mergeCleaned + splitCleaned) != 0) {
271         LOG.info("Scanned " + count + " catalog row(s), gc'd " + mergeCleaned
272             + " unreferenced merged region(s) and " + splitCleaned
273             + " unreferenced parent region(s)");
274       } else if (LOG.isTraceEnabled()) {
275         LOG.trace("Scanned " + count + " catalog row(s), gc'd " + mergeCleaned
276             + " unreferenced merged region(s) and " + splitCleaned
277             + " unreferenced parent region(s)");
278       }
279       return mergeCleaned + splitCleaned;
280     } finally {
281       alreadyRunning.set(false);
282     }
283   }
284 
285   /**
286    * Compare HRegionInfos in a way that has split parents sort BEFORE their
287    * daughters.
288    */
289   static class SplitParentFirstComparator implements Comparator<HRegionInfo> {
290     Comparator<byte[]> rowEndKeyComparator = new Bytes.RowEndKeyComparator();
291     @Override
292     public int compare(HRegionInfo left, HRegionInfo right) {
293       // This comparator differs from the one HRegionInfo in that it sorts
294       // parent before daughters.
295       if (left == null) return -1;
296       if (right == null) return 1;
297       // Same table name.
298       int result = left.getTable().compareTo(right.getTable());
299       if (result != 0) return result;
300       // Compare start keys.
301       result = Bytes.compareTo(left.getStartKey(), right.getStartKey());
302       if (result != 0) return result;
303       // Compare end keys, but flip the operands so parent comes first
304       result = rowEndKeyComparator.compare(right.getEndKey(), left.getEndKey());
305 
306       return result;
307     }
308   }
309 
310   /**
311    * If daughters no longer hold reference to the parents, delete the parent.
312    * @param parent HRegionInfo of split offlined parent
313    * @param rowContent Content of <code>parent</code> row in
314    * <code>metaRegionName</code>
315    * @return True if we removed <code>parent</code> from meta table and from
316    * the filesystem.
317    * @throws IOException
318    */
319   boolean cleanParent(final HRegionInfo parent, Result rowContent)
320   throws IOException {
321     boolean result = false;
322     // Check whether it is a merged region and not clean reference
323     // No necessary to check MERGEB_QUALIFIER because these two qualifiers will
324     // be inserted/deleted together
325     if (rowContent.getValue(HConstants.CATALOG_FAMILY,
326         HConstants.MERGEA_QUALIFIER) != null) {
327       // wait cleaning merge region first
328       return result;
329     }
330     // Run checks on each daughter split.
331     PairOfSameType<HRegionInfo> daughters = MetaTableAccessor.getDaughterRegions(rowContent);
332     Pair<Boolean, Boolean> a = checkDaughterInFs(parent, daughters.getFirst());
333     Pair<Boolean, Boolean> b = checkDaughterInFs(parent, daughters.getSecond());
334     if (hasNoReferences(a) && hasNoReferences(b)) {
335       LOG.debug("Deleting region " + parent.getRegionNameAsString() +
336         " because daughter splits no longer hold references");
337       FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
338       if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent);
339       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
340       MetaTableAccessor.deleteRegion(this.connection, parent);
341       result = true;
342     }
343     return result;
344   }
345 
346   /**
347    * @param p A pair where the first boolean says whether or not the daughter
348    * region directory exists in the filesystem and then the second boolean says
349    * whether the daughter has references to the parent.
350    * @return True the passed <code>p</code> signifies no references.
351    */
352   private boolean hasNoReferences(final Pair<Boolean, Boolean> p) {
353     return !p.getFirst() || !p.getSecond();
354   }
355 
356   /**
357    * Checks if a daughter region -- either splitA or splitB -- still holds
358    * references to parent.
359    * @param parent Parent region
360    * @param daughter Daughter region
361    * @return A pair where the first boolean says whether or not the daughter
362    * region directory exists in the filesystem and then the second boolean says
363    * whether the daughter has references to the parent.
364    * @throws IOException
365    */
366   Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter)
367   throws IOException {
368     if (daughter == null)  {
369       return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
370     }
371 
372     FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
373     Path rootdir = this.services.getMasterFileSystem().getRootDir();
374     Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable());
375 
376     Path daughterRegionDir = new Path(tabledir, daughter.getEncodedName());
377 
378     HRegionFileSystem regionFs = null;
379 
380     try {
381       if (!FSUtils.isExists(fs, daughterRegionDir)) {
382         return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
383       }
384     } catch (IOException ioe) {
385       LOG.warn("Error trying to determine if daughter region exists, " +
386                "assuming exists and has references", ioe);
387       return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.TRUE);
388     }
389 
390     try {
391       regionFs = HRegionFileSystem.openRegionFromFileSystem(
392           this.services.getConfiguration(), fs, tabledir, daughter, true);
393     } catch (IOException e) {
394       LOG.warn("Error trying to determine referenced files from : " + daughter.getEncodedName()
395           + ", to: " + parent.getEncodedName() + " assuming has references", e);
396       return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.TRUE);
397     }
398 
399     boolean references = false;
400     HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
401     for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
402       if ((references = regionFs.hasReferences(family.getNameAsString()))) {
403         break;
404       }
405     }
406     return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
407   }
408 
409   private HTableDescriptor getTableDescriptor(final TableName tableName)
410       throws FileNotFoundException, IOException {
411     return this.services.getTableDescriptors().get(tableName);
412   }
413 
414   /**
415    * Checks if the specified region has merge qualifiers, if so, try to clean
416    * them
417    * @param region
418    * @return true if the specified region doesn't have merge qualifier now
419    * @throws IOException
420    */
421   public boolean cleanMergeQualifier(final HRegionInfo region)
422       throws IOException {
423     // Get merge regions if it is a merged region and already has merge
424     // qualifier
425     Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaTableAccessor
426         .getRegionsFromMergeQualifier(this.services.getConnection(),
427           region.getRegionName());
428     if (mergeRegions == null
429         || (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) {
430       // It doesn't have merge qualifier, no need to clean
431       return true;
432     }
433     // It shouldn't happen, we must insert/delete these two qualifiers together
434     if (mergeRegions.getFirst() == null || mergeRegions.getSecond() == null) {
435       LOG.error("Merged region " + region.getRegionNameAsString()
436           + " has only one merge qualifier in META.");
437       return false;
438     }
439     return cleanMergeRegion(region, mergeRegions.getFirst(),
440         mergeRegions.getSecond());
441   }
442 }