001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile.bucket;
019
020import java.io.IOException;
021import org.apache.hadoop.hbase.io.hfile.Cacheable;
022import org.apache.hadoop.hbase.nio.ByteBuff;
023import org.apache.yetus.audience.InterfaceAudience;
024
025/**
026 * IO engine that stores data to a file on the local block device using memory mapping mechanism
027 */
028@InterfaceAudience.Private
029public class ExclusiveMemoryMmapIOEngine extends FileMmapIOEngine {
030
031  public ExclusiveMemoryMmapIOEngine(String filePath, long capacity) throws IOException {
032    super(filePath, capacity);
033  }
034
035  @Override
036  public Cacheable read(BucketEntry be) throws IOException {
037    ByteBuff dst = be.allocator.allocate(be.getLength());
038    bufferArray.read(be.offset(), dst);
039    dst.position(0).limit(be.getLength());
040    return be.wrapAsCacheable(dst);
041  }
042}