View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver.compactions;
19  
20  import java.io.IOException;
21  import java.util.Collection;
22  import java.util.List;
23  
24  import org.apache.commons.logging.Log;
25  import org.apache.commons.logging.LogFactory;
26  import org.apache.hadoop.conf.Configuration;
27  import org.apache.hadoop.fs.Path;
28  import org.apache.hadoop.hbase.classification.InterfaceAudience;
29  import org.apache.hadoop.hbase.regionserver.InternalScanner;
30  import org.apache.hadoop.hbase.regionserver.Store;
31  import org.apache.hadoop.hbase.regionserver.StoreFile;
32  import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
33  import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
34  import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
35  import org.apache.hadoop.hbase.security.User;
36  
37  import com.google.common.collect.Lists;
38  
39  /**
40   * Compact passed set of files. Create an instance and then call
41   * {@link #compact(CompactionRequest, ThroughputController, User)}
42   */
43  @InterfaceAudience.Private
44  public class DefaultCompactor extends Compactor<StoreFileWriter> {
45    private static final Log LOG = LogFactory.getLog(DefaultCompactor.class);
46  
47    public DefaultCompactor(final Configuration conf, final Store store) {
48      super(conf, store);
49    }
50  
51    private final CellSinkFactory<StoreFileWriter> writerFactory =
52        new CellSinkFactory<StoreFileWriter>() {
53          @Override
54          public StoreFileWriter createWriter(InternalScanner scanner,
55              org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd,
56              boolean shouldDropBehind) throws IOException {
57            return createTmpWriter(fd, shouldDropBehind);
58          }
59        };
60  
61    /**
62     * Do a minor/major compaction on an explicit set of storefiles from a Store.
63     */
64    public List<Path> compact(final CompactionRequest request,
65        ThroughputController throughputController, User user) throws IOException {
66      return compact(request, defaultScannerFactory, writerFactory, throughputController, user);
67    }
68  
69    /**
70     * Compact a list of files for testing. Creates a fake {@link CompactionRequest} to pass to
71     * {@link #compact(CompactionRequest, ThroughputController, User)};
72     * @param filesToCompact the files to compact. These are used as the compactionSelection for the
73     *          generated {@link CompactionRequest}.
74     * @param isMajor true to major compact (prune all deletes, max versions, etc)
75     * @return Product of compaction or an empty list if all cells expired or deleted and nothing \
76     *         made it through the compaction.
77     * @throws IOException
78     */
79    public List<Path> compactForTesting(final Collection<StoreFile> filesToCompact, boolean isMajor)
80        throws IOException {
81      CompactionRequest cr = new CompactionRequest(filesToCompact);
82      cr.setIsMajor(isMajor, isMajor);
83      return compact(cr, NoLimitThroughputController.INSTANCE, null);
84    }
85  
86    @Override
87    protected List<Path> commitWriter(StoreFileWriter writer, FileDetails fd,
88        CompactionRequest request) throws IOException {
89      List<Path> newFiles = Lists.newArrayList(writer.getPath());
90      writer.appendMetadata(fd.maxSeqId, request.isAllFiles());
91      writer.close();
92      return newFiles;
93    }
94  
95    @Override
96    protected void abortWriter(StoreFileWriter writer) throws IOException {
97      Path leftoverFile = writer.getPath();
98      try {
99        writer.close();
100     } catch (IOException e) {
101       LOG.warn("Failed to close the writer after an unfinished compaction.", e);
102     }
103     try {
104       store.getFileSystem().delete(leftoverFile, false);
105     } catch (IOException e) {
106       LOG.warn(
107         "Failed to delete the leftover file " + leftoverFile + " after an unfinished compaction.",
108         e);
109     }
110   }
111 }