001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.wal;
019
020import org.apache.yetus.audience.InterfaceAudience;
021
022/**
023 * Used for {@link BoundedRecoveredEditsOutputSink}. The core part of limiting opening writers is it
024 * doesn't return chunk only if the heap size is over maxHeapUsage. Thus it doesn't need to create a
025 * writer for each region during splitting. The returned {@link EntryBuffers.RegionEntryBuffer} will
026 * be write to recovered edits file and close the writer immediately. See
027 * {@link BoundedRecoveredEditsOutputSink#append(EntryBuffers.RegionEntryBuffer)} for more details.
028 */
029@InterfaceAudience.Private
030public class BoundedEntryBuffers extends EntryBuffers {
031
032  public BoundedEntryBuffers(WALSplitter.PipelineController controller, long maxHeapUsage) {
033    super(controller, maxHeapUsage);
034  }
035
036  @Override
037  synchronized RegionEntryBuffer getChunkToWrite() {
038    if (totalBuffered < maxHeapUsage) {
039      return null;
040    }
041    return super.getChunkToWrite();
042  }
043}