001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import java.util.List;
021
022import org.apache.hadoop.hbase.ScheduledChore;
023import org.apache.hadoop.hbase.Server;
024import org.apache.hadoop.hbase.Stoppable;
025import org.apache.hadoop.hbase.executor.EventType;
026import org.apache.yetus.audience.InterfaceAudience;
027import org.slf4j.Logger;
028import org.slf4j.LoggerFactory;
029import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
030
031/**
032 * A chore service that periodically cleans up the compacted files when there are no active readers
033 * using those compacted files and also helps in clearing the block cache of these compacted
034 * file entries.
035 */
036@InterfaceAudience.Private
037public class CompactedHFilesDischarger extends ScheduledChore {
038  private static final Logger LOG = LoggerFactory.getLogger(CompactedHFilesDischarger.class);
039  private RegionServerServices regionServerServices;
040  // Default is to use executor
041  @VisibleForTesting
042  private boolean useExecutor = true;
043
044  /**
045   * @param period the period of time to sleep between each run
046   * @param stopper the stopper
047   * @param regionServerServices the region server that starts this chore
048   */
049  public CompactedHFilesDischarger(final int period, final Stoppable stopper,
050      final RegionServerServices regionServerServices) {
051    // Need to add the config classes
052    super("CompactedHFilesCleaner", stopper, period);
053    this.regionServerServices = regionServerServices;
054  }
055
056  /**
057   * @param period the period of time to sleep between each run
058   * @param stopper the stopper
059   * @param regionServerServices the region server that starts this chore
060   * @param useExecutor true if to use the region server's executor service, false otherwise
061   */
062  @VisibleForTesting
063  public CompactedHFilesDischarger(final int period, final Stoppable stopper,
064      final RegionServerServices regionServerServices, boolean useExecutor) {
065    // Need to add the config classes
066    this(period, stopper, regionServerServices);
067    this.useExecutor = useExecutor;
068  }
069
070  /**
071   * CompactedHFilesDischarger runs asynchronously by default using the hosting
072   * RegionServer's Executor. In tests it can be useful to force a synchronous
073   * cleanup. Use this method to set no-executor before you call run.
074   * @return The old setting for <code>useExecutor</code>
075   */
076  @VisibleForTesting
077  boolean setUseExecutor(final boolean useExecutor) {
078    boolean oldSetting = this.useExecutor;
079    this.useExecutor = useExecutor;
080    return oldSetting;
081  }
082
083  @Override
084  public void chore() {
085    // Noop if rss is null. This will never happen in a normal condition except for cases
086    // when the test case is not spinning up a cluster
087    if (regionServerServices == null) return;
088    List<HRegion> onlineRegions = (List<HRegion>) regionServerServices.getRegions();
089    if (onlineRegions == null) return;
090    for (HRegion region : onlineRegions) {
091      if (LOG.isTraceEnabled()) {
092        LOG.trace("Started compacted hfiles cleaner on " + region.getRegionInfo());
093      }
094      for (HStore store : region.getStores()) {
095        try {
096          if (useExecutor && regionServerServices != null) {
097            CompactedHFilesDischargeHandler handler = new CompactedHFilesDischargeHandler(
098                (Server) regionServerServices, EventType.RS_COMPACTED_FILES_DISCHARGER, store);
099            regionServerServices.getExecutorService().submit(handler);
100          } else {
101            // call synchronously if the RegionServerServices are not
102            // available
103            store.closeAndArchiveCompactedFiles();
104          }
105          if (LOG.isTraceEnabled()) {
106            LOG.trace("Completed archiving the compacted files for the region "
107                + region.getRegionInfo() + " under the store " + store.getColumnFamilyName());
108          }
109        } catch (Exception e) {
110          LOG.error("Exception while trying to close and archive the compacted store "
111              + "files of the store  " + store.getColumnFamilyName() + " in the" + " region "
112              + region.getRegionInfo(), e);
113        }
114      }
115      if (LOG.isTraceEnabled()) {
116        LOG.trace(
117            "Completed the compacted hfiles cleaner for the region " + region.getRegionInfo());
118      }
119    }
120  }
121}