001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.wal;
019
020import org.apache.yetus.audience.InterfaceAudience;
021
022/**
023 * Used for {@link BoundedRecoveredEditsOutputSink}. The core part of limiting opening writers is it
024 * doesn't return chunk only if the heap size is over maxHeapUsage. Thus it doesn't need to create
025 * a writer for each region during splitting. The returned {@link EntryBuffers.RegionEntryBuffer}
026 * will be write to recovered edits file and close the writer immediately.
027 * See {@link BoundedRecoveredEditsOutputSink#append(EntryBuffers.RegionEntryBuffer)} for more
028 * details.
029 */
030@InterfaceAudience.Private
031public class BoundedEntryBuffers extends EntryBuffers {
032
033  public BoundedEntryBuffers(WALSplitter.PipelineController controller, long maxHeapUsage) {
034    super(controller, maxHeapUsage);
035  }
036
037  @Override
038  synchronized RegionEntryBuffer getChunkToWrite() {
039    if (totalBuffered < maxHeapUsage) {
040      return null;
041    }
042    return super.getChunkToWrite();
043  }
044}