001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.regionserver.compactions;
020
021import static org.apache.hadoop.hbase.regionserver.Store.NO_PRIORITY;
022
023import java.util.Collection;
024import java.util.Collections;
025import java.util.stream.Collectors;
026
027import org.apache.hadoop.hbase.regionserver.HStoreFile;
028import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
029import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
030import org.apache.yetus.audience.InterfaceAudience;
031
032import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
033
034/**
035 * This class holds all logical details necessary to run a compaction.
036 */
037@InterfaceAudience.Private
038public class CompactionRequestImpl implements CompactionRequest {
039
040  // was this compaction promoted to an off-peak
041  private boolean isOffPeak = false;
042  private enum DisplayCompactionType { MINOR, ALL_FILES, MAJOR }
043  private DisplayCompactionType isMajor = DisplayCompactionType.MINOR;
044  private int priority = NO_PRIORITY;
045  private Collection<HStoreFile> filesToCompact;
046
047  // CompactRequest object creation time.
048  private long selectionTime;
049  private String regionName = "";
050  private String storeName = "";
051  private long totalSize = -1L;
052  private CompactionLifeCycleTracker tracker = CompactionLifeCycleTracker.DUMMY;
053
054  public CompactionRequestImpl(Collection<HStoreFile> files) {
055    this.selectionTime = EnvironmentEdgeManager.currentTime();
056    this.filesToCompact = Preconditions.checkNotNull(files, "files for compaction can not null");
057    recalculateSize();
058  }
059
060  public void updateFiles(Collection<HStoreFile> files) {
061    this.filesToCompact = Preconditions.checkNotNull(files, "files for compaction can not null");
062    recalculateSize();
063  }
064
065  @Override
066  public Collection<HStoreFile> getFiles() {
067    return Collections.unmodifiableCollection(this.filesToCompact);
068  }
069
070  /**
071   * Sets the region/store name, for logging.
072   */
073  public void setDescription(String regionName, String storeName) {
074    this.regionName = regionName;
075    this.storeName = storeName;
076  }
077
078  /** Gets the total size of all StoreFiles in compaction */
079  @Override
080  public long getSize() {
081    return totalSize;
082  }
083
084  @Override
085  public boolean isAllFiles() {
086    return this.isMajor == DisplayCompactionType.MAJOR
087        || this.isMajor == DisplayCompactionType.ALL_FILES;
088  }
089
090  @Override
091  public boolean isMajor() {
092    return this.isMajor == DisplayCompactionType.MAJOR;
093  }
094
095  /** Gets the priority for the request */
096  @Override
097  public int getPriority() {
098    return priority;
099  }
100
101  /** Sets the priority for the request */
102  public void setPriority(int p) {
103    this.priority = p;
104  }
105
106  @Override
107  public boolean isOffPeak() {
108    return this.isOffPeak;
109  }
110
111  public void setOffPeak(boolean value) {
112    this.isOffPeak = value;
113  }
114
115  @Override
116  public long getSelectionTime() {
117    return this.selectionTime;
118  }
119
120  /**
121   * Specify if this compaction should be a major compaction based on the state of the store
122   * @param isMajor <tt>true</tt> if the system determines that this compaction should be a major
123   *          compaction
124   */
125  public void setIsMajor(boolean isMajor, boolean isAllFiles) {
126    assert isAllFiles || !isMajor;
127    this.isMajor = !isAllFiles ? DisplayCompactionType.MINOR
128        : (isMajor ? DisplayCompactionType.MAJOR : DisplayCompactionType.ALL_FILES);
129  }
130
131  public void setTracker(CompactionLifeCycleTracker tracker) {
132    this.tracker = tracker;
133  }
134
135  public CompactionLifeCycleTracker getTracker() {
136    return tracker;
137  }
138
139  @Override
140  public int hashCode() {
141    final int prime = 31;
142    int result = 1;
143    result = prime * result + ((filesToCompact == null) ? 0 : filesToCompact.hashCode());
144    result = prime * result + ((isMajor == null) ? 0 : isMajor.hashCode());
145    result = prime * result + (isOffPeak ? 1231 : 1237);
146    result = prime * result + priority;
147    result = prime * result + ((regionName == null) ? 0 : regionName.hashCode());
148    result = prime * result + (int) (selectionTime ^ (selectionTime >>> 32));
149    result = prime * result + ((storeName == null) ? 0 : storeName.hashCode());
150    result = prime * result + (int) (totalSize ^ (totalSize >>> 32));
151    result = prime * result + ((tracker == null) ? 0 : tracker.hashCode());
152    return result;
153  }
154
155  @Override
156  public boolean equals(Object obj) {
157    if (this == obj) {
158      return true;
159    }
160    if (obj == null) {
161      return false;
162    }
163    if (getClass() != obj.getClass()) {
164      return false;
165    }
166    CompactionRequestImpl other = (CompactionRequestImpl) obj;
167    if (filesToCompact == null) {
168      if (other.filesToCompact != null) {
169        return false;
170      }
171    } else if (!filesToCompact.equals(other.filesToCompact)) {
172      return false;
173    }
174    if (isMajor != other.isMajor) {
175      return false;
176    }
177    if (isOffPeak != other.isOffPeak) {
178      return false;
179    }
180    if (priority != other.priority) {
181      return false;
182    }
183    if (regionName == null) {
184      if (other.regionName != null) {
185        return false;
186      }
187    } else if (!regionName.equals(other.regionName)) {
188      return false;
189    }
190    if (selectionTime != other.selectionTime) {
191      return false;
192    }
193    if (storeName == null) {
194      if (other.storeName != null) {
195        return false;
196      }
197    } else if (!storeName.equals(other.storeName)) {
198      return false;
199    }
200    if (totalSize != other.totalSize) {
201      return false;
202    }
203    if (tracker == null) {
204      if (other.tracker != null) {
205        return false;
206      }
207    } else if (!tracker.equals(other.tracker)) {
208      return false;
209    }
210    return true;
211  }
212
213  @Override
214  public String toString() {
215    String fsList = filesToCompact.stream().filter(f -> f.getReader() != null)
216        .map(f -> TraditionalBinaryPrefix.long2String(f.getReader().length(), "", 1))
217        .collect(Collectors.joining(", "));
218
219    return "regionName=" + regionName + ", storeName=" + storeName + ", fileCount=" +
220        this.getFiles().size() + ", fileSize=" +
221        TraditionalBinaryPrefix.long2String(totalSize, "", 1) +
222        ((fsList.isEmpty()) ? "" : " (" + fsList + ")") + ", priority=" + priority + ", time=" +
223        selectionTime;
224  }
225
226  /**
227   * Recalculate the size of the compaction based on current files.
228   */
229  private void recalculateSize() {
230    this.totalSize = filesToCompact.stream().map(HStoreFile::getReader)
231        .mapToLong(r -> r != null ? r.length() : 0L).sum();
232  }
233}