001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import java.io.IOException;
021import java.util.Arrays;
022import java.util.Map;
023import java.util.Optional;
024import java.util.OptionalLong;
025import java.util.TreeMap;
026
027import org.apache.hadoop.fs.Path;
028import org.apache.hadoop.hbase.Cell;
029import org.apache.hadoop.hbase.CellBuilderFactory;
030import org.apache.hadoop.hbase.CellBuilderType;
031import org.apache.hadoop.hbase.HBaseTestingUtility;
032import org.apache.hadoop.hbase.HDFSBlocksDistribution;
033import org.apache.hadoop.hbase.io.hfile.CacheConfig;
034import org.apache.hadoop.hbase.util.Bytes;
035import org.apache.hadoop.hbase.util.DNS;
036import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
037import org.apache.yetus.audience.InterfaceAudience;
038
039/** A mock used so our tests don't deal with actual StoreFiles */
040@InterfaceAudience.Private
041public class MockHStoreFile extends HStoreFile {
042  long length = 0;
043  boolean isRef = false;
044  long ageInDisk;
045  long sequenceid;
046  private Map<byte[], byte[]> metadata = new TreeMap<>(Bytes.BYTES_COMPARATOR);
047  byte[] splitPoint = null;
048  TimeRangeTracker timeRangeTracker;
049  long entryCount;
050  boolean isMajor;
051  HDFSBlocksDistribution hdfsBlocksDistribution;
052  long modificationTime;
053  boolean compactedAway;
054
055  MockHStoreFile(HBaseTestingUtility testUtil, Path testPath,
056      long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException {
057    super(testUtil.getTestFileSystem(), testPath, testUtil.getConfiguration(),
058        new CacheConfig(testUtil.getConfiguration()), BloomType.NONE, true);
059    this.length = length;
060    this.isRef = isRef;
061    this.ageInDisk = ageInDisk;
062    this.sequenceid = sequenceid;
063    this.isMajor = false;
064    hdfsBlocksDistribution = new HDFSBlocksDistribution();
065    hdfsBlocksDistribution.addHostsAndBlockWeight(new String[]
066      { DNS.getHostname(testUtil.getConfiguration(), DNS.ServerType.REGIONSERVER) }, 1);
067    modificationTime = EnvironmentEdgeManager.currentTime();
068  }
069
070  void setLength(long newLen) {
071    this.length = newLen;
072  }
073
074  @Override
075  public long getMaxSequenceId() {
076    return sequenceid;
077  }
078
079  @Override
080  public boolean isMajorCompactionResult() {
081    return isMajor;
082  }
083
084  public void setIsMajor(boolean isMajor) {
085    this.isMajor = isMajor;
086  }
087
088  @Override
089  public boolean isReference() {
090    return this.isRef;
091  }
092
093  @Override
094  public boolean isBulkLoadResult() {
095    return false;
096  }
097
098  @Override
099  public byte[] getMetadataValue(byte[] key) {
100    return this.metadata.get(key);
101  }
102
103  public void setMetadataValue(byte[] key, byte[] value) {
104    this.metadata.put(key, value);
105  }
106
107  void setTimeRangeTracker(TimeRangeTracker timeRangeTracker) {
108    this.timeRangeTracker = timeRangeTracker;
109  }
110
111  void setEntries(long entryCount) {
112    this.entryCount = entryCount;
113  }
114
115  @Override
116  public OptionalLong getMinimumTimestamp() {
117    return timeRangeTracker == null ? OptionalLong.empty()
118        : OptionalLong.of(timeRangeTracker.getMin());
119  }
120
121  @Override
122  public OptionalLong getMaximumTimestamp() {
123    return timeRangeTracker == null ? OptionalLong.empty()
124        : OptionalLong.of(timeRangeTracker.getMax());
125  }
126
127  @Override
128  public void markCompactedAway() {
129    this.compactedAway = true;
130  }
131
132  @Override
133  public boolean isCompactedAway() {
134    return compactedAway;
135  }
136
137  @Override
138  public long getModificationTimeStamp() {
139    return getModificationTimestamp();
140  }
141
142  @Override
143  public long getModificationTimestamp() {
144    return modificationTime;
145  }
146
147  @Override
148  public HDFSBlocksDistribution getHDFSBlockDistribution() {
149    return hdfsBlocksDistribution;
150  }
151
152  @Override
153  public void initReader() throws IOException {
154  }
155
156  @Override
157  public StoreFileScanner getPreadScanner(boolean cacheBlocks, long readPt, long scannerOrder,
158      boolean canOptimizeForNonNullColumn) {
159    return getReader().getStoreFileScanner(cacheBlocks, true, false, readPt, scannerOrder,
160      canOptimizeForNonNullColumn);
161  }
162
163  @Override
164  public StoreFileScanner getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks,
165      boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn)
166      throws IOException {
167    return getReader().getStoreFileScanner(cacheBlocks, false, isCompaction, readPt, scannerOrder,
168      canOptimizeForNonNullColumn);
169  }
170
171  @Override
172  public StoreFileReader getReader() {
173    final long len = this.length;
174    final TimeRangeTracker timeRangeTracker = this.timeRangeTracker;
175    final long entries = this.entryCount;
176    return new StoreFileReader() {
177      @Override
178      public long length() {
179        return len;
180      }
181
182      @Override
183      public long getMaxTimestamp() {
184        return timeRange == null? Long.MAX_VALUE: timeRangeTracker.getMax();
185      }
186
187      @Override
188      public long getEntries() {
189        return entries;
190      }
191
192      @Override
193      public void close(boolean evictOnClose) throws IOException {
194        // no-op
195      }
196
197      @Override
198      public Optional<Cell> getLastKey() {
199        if (splitPoint != null) {
200          return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
201              .setType(Cell.Type.Put)
202              .setRow(Arrays.copyOf(splitPoint, splitPoint.length + 1)).build());
203        } else {
204          return Optional.empty();
205        }
206      }
207
208      @Override
209      public Optional<Cell> midKey() throws IOException {
210        if (splitPoint != null) {
211          return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
212              .setType(Cell.Type.Put).setRow(splitPoint).build());
213        } else {
214          return Optional.empty();
215        }
216      }
217
218      @Override
219      public Optional<Cell> getFirstKey() {
220        if (splitPoint != null) {
221          return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
222              .setType(Cell.Type.Put).setRow(splitPoint, 0, splitPoint.length - 1)
223              .build());
224        } else {
225          return Optional.empty();
226        }
227      }
228    };
229  }
230
231  @Override
232  public OptionalLong getBulkLoadTimestamp() {
233    // we always return false for isBulkLoadResult so we do not have a bulk load timestamp
234    return OptionalLong.empty();
235  }
236}