001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import java.io.IOException;
021import java.util.Arrays;
022import java.util.Map;
023import java.util.Optional;
024import java.util.OptionalLong;
025import java.util.TreeMap;
026import org.apache.hadoop.fs.Path;
027import org.apache.hadoop.hbase.Cell;
028import org.apache.hadoop.hbase.CellBuilderFactory;
029import org.apache.hadoop.hbase.CellBuilderType;
030import org.apache.hadoop.hbase.HBaseTestingUtil;
031import org.apache.hadoop.hbase.HDFSBlocksDistribution;
032import org.apache.hadoop.hbase.io.hfile.CacheConfig;
033import org.apache.hadoop.hbase.util.Bytes;
034import org.apache.hadoop.hbase.util.DNS;
035import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
036import org.apache.yetus.audience.InterfaceAudience;
037
038/** A mock used so our tests don't deal with actual StoreFiles */
039@InterfaceAudience.Private
040public class MockHStoreFile extends HStoreFile {
041  long length = 0;
042  boolean isRef = false;
043  long ageInDisk;
044  long sequenceid;
045  private Map<byte[], byte[]> metadata = new TreeMap<>(Bytes.BYTES_COMPARATOR);
046  byte[] splitPoint = null;
047  TimeRangeTracker timeRangeTracker;
048  long entryCount;
049  boolean isMajor;
050  HDFSBlocksDistribution hdfsBlocksDistribution;
051  long modificationTime;
052  boolean compactedAway;
053
054  MockHStoreFile(HBaseTestingUtil testUtil, Path testPath, long length, long ageInDisk,
055    boolean isRef, long sequenceid) throws IOException {
056    super(testUtil.getTestFileSystem(), testPath, testUtil.getConfiguration(),
057      new CacheConfig(testUtil.getConfiguration()), BloomType.NONE, true);
058    this.length = length;
059    this.isRef = isRef;
060    this.ageInDisk = ageInDisk;
061    this.sequenceid = sequenceid;
062    this.isMajor = false;
063    hdfsBlocksDistribution = new HDFSBlocksDistribution();
064    hdfsBlocksDistribution.addHostsAndBlockWeight(
065      new String[] { DNS.getHostname(testUtil.getConfiguration(), DNS.ServerType.REGIONSERVER) },
066      1);
067    modificationTime = EnvironmentEdgeManager.currentTime();
068  }
069
070  void setLength(long newLen) {
071    this.length = newLen;
072  }
073
074  @Override
075  public long getMaxSequenceId() {
076    return sequenceid;
077  }
078
079  @Override
080  public boolean isMajorCompactionResult() {
081    return isMajor;
082  }
083
084  public void setIsMajor(boolean isMajor) {
085    this.isMajor = isMajor;
086  }
087
088  @Override
089  public boolean isReference() {
090    return this.isRef;
091  }
092
093  @Override
094  public boolean isBulkLoadResult() {
095    return false;
096  }
097
098  @Override
099  public byte[] getMetadataValue(byte[] key) {
100    return this.metadata.get(key);
101  }
102
103  public void setMetadataValue(byte[] key, byte[] value) {
104    this.metadata.put(key, value);
105  }
106
107  void setTimeRangeTracker(TimeRangeTracker timeRangeTracker) {
108    this.timeRangeTracker = timeRangeTracker;
109  }
110
111  void setEntries(long entryCount) {
112    this.entryCount = entryCount;
113  }
114
115  @Override
116  public OptionalLong getMinimumTimestamp() {
117    return timeRangeTracker == null
118      ? OptionalLong.empty()
119      : OptionalLong.of(timeRangeTracker.getMin());
120  }
121
122  @Override
123  public OptionalLong getMaximumTimestamp() {
124    return timeRangeTracker == null
125      ? OptionalLong.empty()
126      : OptionalLong.of(timeRangeTracker.getMax());
127  }
128
129  @Override
130  public void markCompactedAway() {
131    this.compactedAway = true;
132  }
133
134  @Override
135  public boolean isCompactedAway() {
136    return compactedAway;
137  }
138
139  @Override
140  public long getModificationTimestamp() {
141    return modificationTime;
142  }
143
144  @Override
145  public HDFSBlocksDistribution getHDFSBlockDistribution() {
146    return hdfsBlocksDistribution;
147  }
148
149  @Override
150  public void initReader() throws IOException {
151  }
152
153  @Override
154  public StoreFileScanner getPreadScanner(boolean cacheBlocks, long readPt, long scannerOrder,
155    boolean canOptimizeForNonNullColumn) {
156    return getReader().getStoreFileScanner(cacheBlocks, true, false, readPt, scannerOrder,
157      canOptimizeForNonNullColumn);
158  }
159
160  @Override
161  public StoreFileScanner getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks,
162    boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn)
163    throws IOException {
164    return getReader().getStoreFileScanner(cacheBlocks, false, isCompaction, readPt, scannerOrder,
165      canOptimizeForNonNullColumn);
166  }
167
168  @Override
169  public StoreFileReader getReader() {
170    final long len = this.length;
171    final TimeRangeTracker timeRangeTracker = this.timeRangeTracker;
172    final long entries = this.entryCount;
173    return new StoreFileReader() {
174      @Override
175      public long length() {
176        return len;
177      }
178
179      @Override
180      public long getMaxTimestamp() {
181        return timeRange == null ? Long.MAX_VALUE : timeRangeTracker.getMax();
182      }
183
184      @Override
185      public long getEntries() {
186        return entries;
187      }
188
189      @Override
190      public void close(boolean evictOnClose) throws IOException {
191        // no-op
192      }
193
194      @Override
195      public Optional<Cell> getLastKey() {
196        if (splitPoint != null) {
197          return Optional
198            .of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setType(Cell.Type.Put)
199              .setRow(Arrays.copyOf(splitPoint, splitPoint.length + 1)).build());
200        } else {
201          return Optional.empty();
202        }
203      }
204
205      @Override
206      public Optional<Cell> midKey() throws IOException {
207        if (splitPoint != null) {
208          return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
209            .setType(Cell.Type.Put).setRow(splitPoint).build());
210        } else {
211          return Optional.empty();
212        }
213      }
214
215      @Override
216      public Optional<Cell> getFirstKey() {
217        if (splitPoint != null) {
218          return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
219            .setType(Cell.Type.Put).setRow(splitPoint, 0, splitPoint.length - 1).build());
220        } else {
221          return Optional.empty();
222        }
223      }
224    };
225  }
226
227  @Override
228  public OptionalLong getBulkLoadTimestamp() {
229    // we always return false for isBulkLoadResult so we do not have a bulk load timestamp
230    return OptionalLong.empty();
231  }
232}