001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.apache.hadoop.hbase.regionserver.Store.NO_PRIORITY;
021import static org.apache.hadoop.hbase.regionserver.Store.PRIORITY_USER;
022
023import java.io.IOException;
024import java.io.PrintWriter;
025import java.io.StringWriter;
026import java.util.Comparator;
027import java.util.Iterator;
028import java.util.Optional;
029import java.util.Set;
030import java.util.concurrent.BlockingQueue;
031import java.util.concurrent.ConcurrentHashMap;
032import java.util.concurrent.Executors;
033import java.util.concurrent.RejectedExecutionException;
034import java.util.concurrent.RejectedExecutionHandler;
035import java.util.concurrent.ThreadPoolExecutor;
036import java.util.concurrent.TimeUnit;
037import java.util.concurrent.atomic.AtomicInteger;
038import java.util.function.IntSupplier;
039import org.apache.hadoop.conf.Configuration;
040import org.apache.hadoop.hbase.conf.ConfigurationManager;
041import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
042import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
043import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
044import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
045import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
046import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester;
047import org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
048import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
049import org.apache.hadoop.hbase.security.Superusers;
050import org.apache.hadoop.hbase.security.User;
051import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
052import org.apache.hadoop.hbase.util.StealJobQueue;
053import org.apache.hadoop.ipc.RemoteException;
054import org.apache.hadoop.util.StringUtils;
055import org.apache.yetus.audience.InterfaceAudience;
056import org.slf4j.Logger;
057import org.slf4j.LoggerFactory;
058
059import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
060import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
061
062/**
063 * Compact region on request and then run split if appropriate
064 */
065@InterfaceAudience.Private
066public class CompactSplit implements CompactionRequester, PropagatingConfigurationObserver {
067  private static final Logger LOG = LoggerFactory.getLogger(CompactSplit.class);
068
069  // Configuration key for the large compaction threads.
070  public final static String LARGE_COMPACTION_THREADS =
071    "hbase.regionserver.thread.compaction.large";
072  public final static int LARGE_COMPACTION_THREADS_DEFAULT = 1;
073
074  // Configuration key for the small compaction threads.
075  public final static String SMALL_COMPACTION_THREADS =
076    "hbase.regionserver.thread.compaction.small";
077  public final static int SMALL_COMPACTION_THREADS_DEFAULT = 1;
078
079  // Configuration key for split threads
080  public final static String SPLIT_THREADS = "hbase.regionserver.thread.split";
081  public final static int SPLIT_THREADS_DEFAULT = 1;
082
083  public static final String REGION_SERVER_REGION_SPLIT_LIMIT =
084    "hbase.regionserver.regionSplitLimit";
085  public static final int DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT = 1000;
086  public static final String HBASE_REGION_SERVER_ENABLE_COMPACTION =
087    "hbase.regionserver.compaction.enabled";
088
089  private final HRegionServer server;
090  private final Configuration conf;
091  private volatile ThreadPoolExecutor longCompactions;
092  private volatile ThreadPoolExecutor shortCompactions;
093  private volatile ThreadPoolExecutor splits;
094
095  private volatile ThroughputController compactionThroughputController;
096  private volatile Set<String> underCompactionStores = ConcurrentHashMap.newKeySet();
097
098  private volatile boolean compactionsEnabled;
099  /**
100   * Splitting should not take place if the total number of regions exceed this. This is not a hard
101   * limit to the number of regions but it is a guideline to stop splitting after number of online
102   * regions is greater than this.
103   */
104  private int regionSplitLimit;
105
106  CompactSplit(HRegionServer server) {
107    this.server = server;
108    this.conf = server.getConfiguration();
109    this.compactionsEnabled = this.conf.getBoolean(HBASE_REGION_SERVER_ENABLE_COMPACTION, true);
110    createCompactionExecutors();
111    createSplitExcecutors();
112
113    // compaction throughput controller
114    this.compactionThroughputController =
115      CompactionThroughputControllerFactory.create(server, conf);
116  }
117
118  // only for test
119  public CompactSplit(Configuration conf) {
120    this.server = null;
121    this.conf = conf;
122    this.compactionsEnabled = this.conf.getBoolean(HBASE_REGION_SERVER_ENABLE_COMPACTION, true);
123    createCompactionExecutors();
124    createSplitExcecutors();
125  }
126
127  private void createSplitExcecutors() {
128    final String n = Thread.currentThread().getName();
129    int splitThreads = conf.getInt(SPLIT_THREADS, SPLIT_THREADS_DEFAULT);
130    this.splits = (ThreadPoolExecutor) Executors.newFixedThreadPool(splitThreads,
131      new ThreadFactoryBuilder().setNameFormat(n + "-splits-%d").setDaemon(true).build());
132  }
133
134  private void createCompactionExecutors() {
135    this.regionSplitLimit =
136      conf.getInt(REGION_SERVER_REGION_SPLIT_LIMIT, DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT);
137
138    int largeThreads =
139      Math.max(1, conf.getInt(LARGE_COMPACTION_THREADS, LARGE_COMPACTION_THREADS_DEFAULT));
140    int smallThreads = conf.getInt(SMALL_COMPACTION_THREADS, SMALL_COMPACTION_THREADS_DEFAULT);
141
142    // if we have throttle threads, make sure the user also specified size
143    Preconditions.checkArgument(largeThreads > 0 && smallThreads > 0);
144
145    final String n = Thread.currentThread().getName();
146
147    StealJobQueue<Runnable> stealJobQueue = new StealJobQueue<Runnable>(COMPARATOR);
148    this.longCompactions = new ThreadPoolExecutor(largeThreads, largeThreads, 60, TimeUnit.SECONDS,
149      stealJobQueue,
150      new ThreadFactoryBuilder().setNameFormat(n + "-longCompactions-%d").setDaemon(true).build());
151    this.longCompactions.setRejectedExecutionHandler(new Rejection());
152    this.longCompactions.prestartAllCoreThreads();
153    this.shortCompactions = new ThreadPoolExecutor(smallThreads, smallThreads, 60, TimeUnit.SECONDS,
154      stealJobQueue.getStealFromQueue(),
155      new ThreadFactoryBuilder().setNameFormat(n + "-shortCompactions-%d").setDaemon(true).build());
156    this.shortCompactions.setRejectedExecutionHandler(new Rejection());
157  }
158
159  @Override
160  public String toString() {
161    return "compactionQueue=(longCompactions=" + longCompactions.getQueue().size()
162      + ":shortCompactions=" + shortCompactions.getQueue().size() + ")" + ", splitQueue="
163      + splits.getQueue().size();
164  }
165
166  public String dumpQueue() {
167    StringBuilder queueLists = new StringBuilder();
168    queueLists.append("Compaction/Split Queue dump:\n");
169    queueLists.append("  LargeCompation Queue:\n");
170    BlockingQueue<Runnable> lq = longCompactions.getQueue();
171    Iterator<Runnable> it = lq.iterator();
172    while (it.hasNext()) {
173      queueLists.append("    " + it.next().toString());
174      queueLists.append("\n");
175    }
176
177    if (shortCompactions != null) {
178      queueLists.append("\n");
179      queueLists.append("  SmallCompation Queue:\n");
180      lq = shortCompactions.getQueue();
181      it = lq.iterator();
182      while (it.hasNext()) {
183        queueLists.append("    " + it.next().toString());
184        queueLists.append("\n");
185      }
186    }
187
188    queueLists.append("\n");
189    queueLists.append("  Split Queue:\n");
190    lq = splits.getQueue();
191    it = lq.iterator();
192    while (it.hasNext()) {
193      queueLists.append("    " + it.next().toString());
194      queueLists.append("\n");
195    }
196
197    return queueLists.toString();
198  }
199
200  public synchronized boolean requestSplit(final Region r) {
201    // Don't split regions that are blocking is the default behavior.
202    // But in some circumstances, split here is needed to prevent the region size from
203    // continuously growing, as well as the number of store files, see HBASE-26242.
204    HRegion hr = (HRegion) r;
205    try {
206      if (shouldSplitRegion() && hr.getCompactPriority() >= PRIORITY_USER) {
207        byte[] midKey = hr.checkSplit().orElse(null);
208        if (midKey != null) {
209          requestSplit(r, midKey);
210          return true;
211        }
212      }
213    } catch (IndexOutOfBoundsException e) {
214      // We get this sometimes. Not sure why. Catch and return false; no split request.
215      LOG.warn("Catching out-of-bounds; region={}, policy={}",
216        hr == null ? null : hr.getRegionInfo(), hr == null ? "null" : hr.getCompactPriority(), e);
217    }
218    return false;
219  }
220
221  private synchronized void requestSplit(final Region r, byte[] midKey) {
222    requestSplit(r, midKey, null);
223  }
224
225  /*
226   * The User parameter allows the split thread to assume the correct user identity
227   */
228  private synchronized void requestSplit(final Region r, byte[] midKey, User user) {
229    if (midKey == null) {
230      LOG.debug("Region " + r.getRegionInfo().getRegionNameAsString()
231        + " not splittable because midkey=null");
232      return;
233    }
234    try {
235      this.splits.execute(new SplitRequest(r, midKey, this.server, user));
236      if (LOG.isDebugEnabled()) {
237        LOG.debug("Splitting " + r + ", " + this);
238      }
239    } catch (RejectedExecutionException ree) {
240      LOG.info("Could not execute split for " + r, ree);
241    }
242  }
243
244  private void interrupt() {
245    longCompactions.shutdownNow();
246    shortCompactions.shutdownNow();
247  }
248
249  private void reInitializeCompactionsExecutors() {
250    createCompactionExecutors();
251  }
252
253  // set protected for test
254  protected interface CompactionCompleteTracker {
255
256    default void completed(Store store) {
257    }
258  }
259
260  private static final CompactionCompleteTracker DUMMY_COMPLETE_TRACKER =
261    new CompactionCompleteTracker() {
262    };
263
264  private static final class AggregatingCompleteTracker implements CompactionCompleteTracker {
265
266    private final CompactionLifeCycleTracker tracker;
267
268    private final AtomicInteger remaining;
269
270    public AggregatingCompleteTracker(CompactionLifeCycleTracker tracker, int numberOfStores) {
271      this.tracker = tracker;
272      this.remaining = new AtomicInteger(numberOfStores);
273    }
274
275    @Override
276    public void completed(Store store) {
277      if (remaining.decrementAndGet() == 0) {
278        tracker.completed();
279      }
280    }
281  }
282
283  private CompactionCompleteTracker getCompleteTracker(CompactionLifeCycleTracker tracker,
284    IntSupplier numberOfStores) {
285    if (tracker == CompactionLifeCycleTracker.DUMMY) {
286      // a simple optimization to avoid creating unnecessary objects as usually we do not care about
287      // the life cycle of a compaction.
288      return DUMMY_COMPLETE_TRACKER;
289    } else {
290      return new AggregatingCompleteTracker(tracker, numberOfStores.getAsInt());
291    }
292  }
293
294  @Override
295  public synchronized void requestCompaction(HRegion region, String why, int priority,
296    CompactionLifeCycleTracker tracker, User user) throws IOException {
297    requestCompactionInternal(region, why, priority, true, tracker,
298      getCompleteTracker(tracker, () -> region.getTableDescriptor().getColumnFamilyCount()), user);
299  }
300
301  @Override
302  public synchronized void requestCompaction(HRegion region, HStore store, String why, int priority,
303    CompactionLifeCycleTracker tracker, User user) throws IOException {
304    requestCompactionInternal(region, store, why, priority, true, tracker,
305      getCompleteTracker(tracker, () -> 1), user);
306  }
307
308  @Override
309  public void switchCompaction(boolean onOrOff) {
310    if (onOrOff) {
311      // re-create executor pool if compactions are disabled.
312      if (!isCompactionsEnabled()) {
313        LOG.info("Re-Initializing compactions because user switched on compactions");
314        reInitializeCompactionsExecutors();
315      }
316    } else {
317      LOG.info("Interrupting running compactions because user switched off compactions");
318      interrupt();
319    }
320    setCompactionsEnabled(onOrOff);
321  }
322
323  private void requestCompactionInternal(HRegion region, String why, int priority,
324    boolean selectNow, CompactionLifeCycleTracker tracker,
325    CompactionCompleteTracker completeTracker, User user) throws IOException {
326    // request compaction on all stores
327    for (HStore store : region.stores.values()) {
328      requestCompactionInternal(region, store, why, priority, selectNow, tracker, completeTracker,
329        user);
330    }
331  }
332
333  // set protected for test
334  protected void requestCompactionInternal(HRegion region, HStore store, String why, int priority,
335    boolean selectNow, CompactionLifeCycleTracker tracker,
336    CompactionCompleteTracker completeTracker, User user) throws IOException {
337    if (
338      this.server.isStopped() || (region.getTableDescriptor() != null
339        && !region.getTableDescriptor().isCompactionEnabled())
340    ) {
341      return;
342    }
343    RegionServerSpaceQuotaManager spaceQuotaManager =
344      this.server.getRegionServerSpaceQuotaManager();
345
346    if (
347      user != null && !Superusers.isSuperUser(user) && spaceQuotaManager != null
348        && spaceQuotaManager.areCompactionsDisabled(region.getTableDescriptor().getTableName())
349    ) {
350      // Enter here only when:
351      // It's a user generated req, the user is super user, quotas enabled, compactions disabled.
352      String reason = "Ignoring compaction request for " + region
353        + " as an active space quota violation " + " policy disallows compactions.";
354      tracker.notExecuted(store, reason);
355      completeTracker.completed(store);
356      LOG.debug(reason);
357      return;
358    }
359
360    CompactionContext compaction;
361    if (selectNow) {
362      Optional<CompactionContext> c =
363        selectCompaction(region, store, priority, tracker, completeTracker, user);
364      if (!c.isPresent()) {
365        // message logged inside
366        return;
367      }
368      compaction = c.get();
369    } else {
370      compaction = null;
371    }
372
373    ThreadPoolExecutor pool;
374    if (selectNow) {
375      // compaction.get is safe as we will just return if selectNow is true but no compaction is
376      // selected
377      pool = store.throttleCompaction(compaction.getRequest().getSize())
378        ? longCompactions
379        : shortCompactions;
380    } else {
381      // We assume that most compactions are small. So, put system compactions into small
382      // pool; we will do selection there, and move to large pool if necessary.
383      pool = shortCompactions;
384    }
385    pool.execute(
386      new CompactionRunner(store, region, compaction, tracker, completeTracker, pool, user));
387    if (LOG.isDebugEnabled()) {
388      LOG.debug(
389        "Add compact mark for store {}, priority={}, current under compaction "
390          + "store size is {}",
391        getStoreNameForUnderCompaction(store), priority, underCompactionStores.size());
392    }
393    underCompactionStores.add(getStoreNameForUnderCompaction(store));
394    region.incrementCompactionsQueuedCount();
395    if (LOG.isDebugEnabled()) {
396      String type = (pool == shortCompactions) ? "Small " : "Large ";
397      LOG.debug(type + "Compaction requested: " + (selectNow ? compaction.toString() : "system")
398        + (why != null && !why.isEmpty() ? "; Because: " + why : "") + "; " + this);
399    }
400  }
401
402  public synchronized void requestSystemCompaction(HRegion region, String why) throws IOException {
403    requestCompactionInternal(region, why, NO_PRIORITY, false, CompactionLifeCycleTracker.DUMMY,
404      DUMMY_COMPLETE_TRACKER, null);
405  }
406
407  public void requestSystemCompaction(HRegion region, HStore store, String why) throws IOException {
408    requestSystemCompaction(region, store, why, false);
409  }
410
411  public synchronized void requestSystemCompaction(HRegion region, HStore store, String why,
412    boolean giveUpIfRequestedOrCompacting) throws IOException {
413    if (giveUpIfRequestedOrCompacting && isUnderCompaction(store)) {
414      LOG.debug("Region {} store {} is under compaction now, skip to request compaction", region,
415        store.getColumnFamilyName());
416      return;
417    }
418    requestCompactionInternal(region, store, why, NO_PRIORITY, false,
419      CompactionLifeCycleTracker.DUMMY, DUMMY_COMPLETE_TRACKER, null);
420  }
421
422  private Optional<CompactionContext> selectCompaction(HRegion region, HStore store, int priority,
423    CompactionLifeCycleTracker tracker, CompactionCompleteTracker completeTracker, User user)
424    throws IOException {
425    // don't even select for compaction if disableCompactions is set to true
426    if (!isCompactionsEnabled()) {
427      LOG.info(String.format("User has disabled compactions"));
428      return Optional.empty();
429    }
430    Optional<CompactionContext> compaction = store.requestCompaction(priority, tracker, user);
431    if (!compaction.isPresent() && region.getRegionInfo() != null) {
432      String reason = "Not compacting " + region.getRegionInfo().getRegionNameAsString()
433        + " because compaction request was cancelled";
434      tracker.notExecuted(store, reason);
435      completeTracker.completed(store);
436      LOG.debug(reason);
437    }
438    return compaction;
439  }
440
441  /**
442   * Only interrupt once it's done with a run through the work loop.
443   */
444  void interruptIfNecessary() {
445    splits.shutdown();
446    longCompactions.shutdown();
447    shortCompactions.shutdown();
448  }
449
450  private void waitFor(ThreadPoolExecutor t, String name) {
451    boolean done = false;
452    while (!done) {
453      try {
454        done = t.awaitTermination(60, TimeUnit.SECONDS);
455        LOG.info("Waiting for " + name + " to finish...");
456        if (!done) {
457          t.shutdownNow();
458        }
459      } catch (InterruptedException ie) {
460        LOG.warn("Interrupted waiting for " + name + " to finish...");
461        t.shutdownNow();
462      }
463    }
464  }
465
466  void join() {
467    waitFor(splits, "Split Thread");
468    waitFor(longCompactions, "Large Compaction Thread");
469    waitFor(shortCompactions, "Small Compaction Thread");
470  }
471
472  /**
473   * Returns the current size of the queue containing regions that are processed.
474   * @return The current size of the regions queue.
475   */
476  public int getCompactionQueueSize() {
477    return longCompactions.getQueue().size() + shortCompactions.getQueue().size();
478  }
479
480  public int getLargeCompactionQueueSize() {
481    return longCompactions.getQueue().size();
482  }
483
484  public int getSmallCompactionQueueSize() {
485    return shortCompactions.getQueue().size();
486  }
487
488  public int getSplitQueueSize() {
489    return splits.getQueue().size();
490  }
491
492  private boolean shouldSplitRegion() {
493    if (server.getNumberOfOnlineRegions() > 0.9 * regionSplitLimit) {
494      LOG.warn("Total number of regions is approaching the upper limit " + regionSplitLimit + ". "
495        + "Please consider taking a look at http://hbase.apache.org/book.html#ops.regionmgt");
496    }
497    return (regionSplitLimit > server.getNumberOfOnlineRegions());
498  }
499
500  /**
501   * @return the regionSplitLimit
502   */
503  public int getRegionSplitLimit() {
504    return this.regionSplitLimit;
505  }
506
507  /**
508   * Check if this store is under compaction
509   */
510  public boolean isUnderCompaction(final HStore s) {
511    return underCompactionStores.contains(getStoreNameForUnderCompaction(s));
512  }
513
514  private static final Comparator<Runnable> COMPARATOR = new Comparator<Runnable>() {
515
516    private int compare(CompactionRequestImpl r1, CompactionRequestImpl r2) {
517      if (r1 == r2) {
518        return 0; // they are the same request
519      }
520      // less first
521      int cmp = Integer.compare(r1.getPriority(), r2.getPriority());
522      if (cmp != 0) {
523        return cmp;
524      }
525      cmp = Long.compare(r1.getSelectionTime(), r2.getSelectionTime());
526      if (cmp != 0) {
527        return cmp;
528      }
529
530      // break the tie based on hash code
531      return System.identityHashCode(r1) - System.identityHashCode(r2);
532    }
533
534    @Override
535    public int compare(Runnable r1, Runnable r2) {
536      // CompactionRunner first
537      if (r1 instanceof CompactionRunner) {
538        if (!(r2 instanceof CompactionRunner)) {
539          return -1;
540        }
541      } else {
542        if (r2 instanceof CompactionRunner) {
543          return 1;
544        } else {
545          // break the tie based on hash code
546          return System.identityHashCode(r1) - System.identityHashCode(r2);
547        }
548      }
549      CompactionRunner o1 = (CompactionRunner) r1;
550      CompactionRunner o2 = (CompactionRunner) r2;
551      // less first
552      int cmp = Integer.compare(o1.queuedPriority, o2.queuedPriority);
553      if (cmp != 0) {
554        return cmp;
555      }
556      CompactionContext c1 = o1.compaction;
557      CompactionContext c2 = o2.compaction;
558      if (c1 != null) {
559        return c2 != null ? compare(c1.getRequest(), c2.getRequest()) : -1;
560      } else {
561        return c2 != null ? 1 : 0;
562      }
563    }
564  };
565
566  private final class CompactionRunner implements Runnable {
567    private final HStore store;
568    private final HRegion region;
569    private final CompactionContext compaction;
570    private final CompactionLifeCycleTracker tracker;
571    private final CompactionCompleteTracker completeTracker;
572    private int queuedPriority;
573    private ThreadPoolExecutor parent;
574    private User user;
575    private long time;
576
577    public CompactionRunner(HStore store, HRegion region, CompactionContext compaction,
578      CompactionLifeCycleTracker tracker, CompactionCompleteTracker completeTracker,
579      ThreadPoolExecutor parent, User user) {
580      this.store = store;
581      this.region = region;
582      this.compaction = compaction;
583      this.tracker = tracker;
584      this.completeTracker = completeTracker;
585      this.queuedPriority =
586        compaction != null ? compaction.getRequest().getPriority() : store.getCompactPriority();
587      this.parent = parent;
588      this.user = user;
589      this.time = EnvironmentEdgeManager.currentTime();
590    }
591
592    @Override
593    public String toString() {
594      if (compaction != null) {
595        return "Request=" + compaction.getRequest();
596      } else {
597        return "region=" + region.toString() + ", storeName=" + store.toString() + ", priority="
598          + queuedPriority + ", startTime=" + time;
599      }
600    }
601
602    private void doCompaction(User user) {
603      CompactionContext c;
604      // Common case - system compaction without a file selection. Select now.
605      if (compaction == null) {
606        int oldPriority = this.queuedPriority;
607        this.queuedPriority = this.store.getCompactPriority();
608        if (this.queuedPriority > oldPriority) {
609          // Store priority decreased while we were in queue (due to some other compaction?),
610          // requeue with new priority to avoid blocking potential higher priorities.
611          this.parent.execute(this);
612          return;
613        }
614        Optional<CompactionContext> selected;
615        try {
616          selected = selectCompaction(this.region, this.store, queuedPriority, tracker,
617            completeTracker, user);
618        } catch (IOException ex) {
619          LOG.error("Compaction selection failed " + this, ex);
620          server.checkFileSystem();
621          region.decrementCompactionsQueuedCount();
622          return;
623        }
624        if (!selected.isPresent()) {
625          region.decrementCompactionsQueuedCount();
626          return; // nothing to do
627        }
628        c = selected.get();
629        assert c.hasSelection();
630        // Now see if we are in correct pool for the size; if not, go to the correct one.
631        // We might end up waiting for a while, so cancel the selection.
632
633        ThreadPoolExecutor pool =
634          store.throttleCompaction(c.getRequest().getSize()) ? longCompactions : shortCompactions;
635
636        // Long compaction pool can process small job
637        // Short compaction pool should not process large job
638        if (this.parent == shortCompactions && pool == longCompactions) {
639          this.store.cancelRequestedCompaction(c);
640          this.parent = pool;
641          this.parent.execute(this);
642          return;
643        }
644      } else {
645        c = compaction;
646      }
647      // Finally we can compact something.
648      assert c != null;
649
650      tracker.beforeExecution(store);
651      try {
652        // Note: please don't put single-compaction logic here;
653        // put it into region/store/etc. This is CST logic.
654        long start = EnvironmentEdgeManager.currentTime();
655        boolean completed = region.compact(c, store, compactionThroughputController, user);
656        long now = EnvironmentEdgeManager.currentTime();
657        LOG.info(((completed) ? "Completed" : "Aborted") + " compaction " + this + "; duration="
658          + StringUtils.formatTimeDiff(now, start));
659        if (completed) {
660          // degenerate case: blocked regions require recursive enqueues
661          if (
662            region.getCompactPriority() < Store.PRIORITY_USER && store.getCompactPriority() <= 0
663          ) {
664            requestSystemCompaction(region, store, "Recursive enqueue");
665          } else {
666            // see if the compaction has caused us to exceed max region size
667            if (!requestSplit(region) && store.getCompactPriority() <= 0) {
668              requestSystemCompaction(region, store, "Recursive enqueue");
669            }
670          }
671        }
672      } catch (IOException ex) {
673        IOException remoteEx =
674          ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex;
675        LOG.error("Compaction failed " + this, remoteEx);
676        if (remoteEx != ex) {
677          LOG.info("Compaction failed at original callstack: " + formatStackTrace(ex));
678        }
679        region.reportCompactionRequestFailure();
680        server.checkFileSystem();
681      } catch (Exception ex) {
682        LOG.error("Compaction failed " + this, ex);
683        region.reportCompactionRequestFailure();
684        server.checkFileSystem();
685      } finally {
686        tracker.afterExecution(store);
687        completeTracker.completed(store);
688        region.decrementCompactionsQueuedCount();
689        LOG.debug("Status {}", CompactSplit.this);
690      }
691    }
692
693    @Override
694    public void run() {
695      try {
696        Preconditions.checkNotNull(server);
697        if (
698          server.isStopped() || (region.getTableDescriptor() != null
699            && !region.getTableDescriptor().isCompactionEnabled())
700        ) {
701          region.decrementCompactionsQueuedCount();
702          return;
703        }
704        doCompaction(user);
705      } finally {
706        if (LOG.isDebugEnabled()) {
707          LOG.debug("Remove under compaction mark for store: {}",
708            store.getHRegion().getRegionInfo().getEncodedName() + ":"
709              + store.getColumnFamilyName());
710        }
711        underCompactionStores.remove(getStoreNameForUnderCompaction(store));
712      }
713    }
714
715    private String formatStackTrace(Exception ex) {
716      StringWriter sw = new StringWriter();
717      PrintWriter pw = new PrintWriter(sw);
718      ex.printStackTrace(pw);
719      pw.flush();
720      return sw.toString();
721    }
722  }
723
724  /**
725   * Cleanup class to use when rejecting a compaction request from the queue.
726   */
727  private static class Rejection implements RejectedExecutionHandler {
728    @Override
729    public void rejectedExecution(Runnable runnable, ThreadPoolExecutor pool) {
730      if (runnable instanceof CompactionRunner) {
731        CompactionRunner runner = (CompactionRunner) runnable;
732        LOG.debug("Compaction Rejected: " + runner);
733        if (runner.compaction != null) {
734          runner.store.cancelRequestedCompaction(runner.compaction);
735        }
736      }
737    }
738  }
739
740  /**
741   * {@inheritDoc}
742   */
743  @Override
744  public void onConfigurationChange(Configuration newConf) {
745    // Check if number of large / small compaction threads has changed, and then
746    // adjust the core pool size of the thread pools, by using the
747    // setCorePoolSize() method. According to the javadocs, it is safe to
748    // change the core pool size on-the-fly. We need to reset the maximum
749    // pool size, as well.
750    int largeThreads =
751      Math.max(1, newConf.getInt(LARGE_COMPACTION_THREADS, LARGE_COMPACTION_THREADS_DEFAULT));
752    if (this.longCompactions.getCorePoolSize() != largeThreads) {
753      LOG.info("Changing the value of " + LARGE_COMPACTION_THREADS + " from "
754        + this.longCompactions.getCorePoolSize() + " to " + largeThreads);
755      if (this.longCompactions.getCorePoolSize() < largeThreads) {
756        this.longCompactions.setMaximumPoolSize(largeThreads);
757        this.longCompactions.setCorePoolSize(largeThreads);
758      } else {
759        this.longCompactions.setCorePoolSize(largeThreads);
760        this.longCompactions.setMaximumPoolSize(largeThreads);
761      }
762    }
763
764    int smallThreads = newConf.getInt(SMALL_COMPACTION_THREADS, SMALL_COMPACTION_THREADS_DEFAULT);
765    if (this.shortCompactions.getCorePoolSize() != smallThreads) {
766      LOG.info("Changing the value of " + SMALL_COMPACTION_THREADS + " from "
767        + this.shortCompactions.getCorePoolSize() + " to " + smallThreads);
768      if (this.shortCompactions.getCorePoolSize() < smallThreads) {
769        this.shortCompactions.setMaximumPoolSize(smallThreads);
770        this.shortCompactions.setCorePoolSize(smallThreads);
771      } else {
772        this.shortCompactions.setCorePoolSize(smallThreads);
773        this.shortCompactions.setMaximumPoolSize(smallThreads);
774      }
775    }
776
777    int splitThreads = newConf.getInt(SPLIT_THREADS, SPLIT_THREADS_DEFAULT);
778    if (this.splits.getCorePoolSize() != splitThreads) {
779      LOG.info("Changing the value of " + SPLIT_THREADS + " from " + this.splits.getCorePoolSize()
780        + " to " + splitThreads);
781      if (this.splits.getCorePoolSize() < splitThreads) {
782        this.splits.setMaximumPoolSize(splitThreads);
783        this.splits.setCorePoolSize(splitThreads);
784      } else {
785        this.splits.setCorePoolSize(splitThreads);
786        this.splits.setMaximumPoolSize(splitThreads);
787      }
788    }
789
790    ThroughputController old = this.compactionThroughputController;
791    if (old != null) {
792      old.stop("configuration change");
793    }
794    this.compactionThroughputController =
795      CompactionThroughputControllerFactory.create(server, newConf);
796
797    // We change this atomically here instead of reloading the config in order that upstream
798    // would be the only one with the flexibility to reload the config.
799    this.conf.reloadConfiguration();
800  }
801
802  protected int getSmallCompactionThreadNum() {
803    return this.shortCompactions.getCorePoolSize();
804  }
805
806  protected int getLargeCompactionThreadNum() {
807    return this.longCompactions.getCorePoolSize();
808  }
809
810  protected int getSplitThreadNum() {
811    return this.splits.getCorePoolSize();
812  }
813
814  /**
815   * {@inheritDoc}
816   */
817  @Override
818  public void registerChildren(ConfigurationManager manager) {
819    // No children to register.
820  }
821
822  /**
823   * {@inheritDoc}
824   */
825  @Override
826  public void deregisterChildren(ConfigurationManager manager) {
827    // No children to register
828  }
829
830  public ThroughputController getCompactionThroughputController() {
831    return compactionThroughputController;
832  }
833
834  /**
835   * Shutdown the long compaction thread pool. Should only be used in unit test to prevent long
836   * compaction thread pool from stealing job from short compaction queue
837   */
838  void shutdownLongCompactions() {
839    this.longCompactions.shutdown();
840  }
841
842  public void clearLongCompactionsQueue() {
843    longCompactions.getQueue().clear();
844  }
845
846  public void clearShortCompactionsQueue() {
847    shortCompactions.getQueue().clear();
848  }
849
850  public boolean isCompactionsEnabled() {
851    return compactionsEnabled;
852  }
853
854  public void setCompactionsEnabled(boolean compactionsEnabled) {
855    this.compactionsEnabled = compactionsEnabled;
856    this.conf.set(HBASE_REGION_SERVER_ENABLE_COMPACTION, String.valueOf(compactionsEnabled));
857  }
858
859  /**
860   * @return the longCompactions thread pool executor
861   */
862  ThreadPoolExecutor getLongCompactions() {
863    return longCompactions;
864  }
865
866  /**
867   * @return the shortCompactions thread pool executor
868   */
869  ThreadPoolExecutor getShortCompactions() {
870    return shortCompactions;
871  }
872
873  private String getStoreNameForUnderCompaction(HStore store) {
874    return String.format("%s:%s",
875      store.getHRegion() != null ? store.getHRegion().getRegionInfo().getEncodedName() : "",
876      store.getColumnFamilyName());
877  }
878
879}