001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.tool;
019
020import java.io.IOException;
021import java.util.Optional;
022import java.util.concurrent.atomic.AtomicLong;
023import org.apache.hadoop.hbase.HConstants;
024import org.apache.hadoop.hbase.client.Mutation;
025import org.apache.hadoop.hbase.coprocessor.ObserverContext;
026import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
027import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
028import org.apache.hadoop.hbase.coprocessor.RegionObserver;
029import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
030import org.apache.hadoop.hbase.regionserver.OperationStatus;
031import org.apache.yetus.audience.InterfaceAudience;
032import org.slf4j.Logger;
033import org.slf4j.LoggerFactory;
034
035/**
036 * <p>
037 * This coprocessor 'shallows' all the writes. It allows to test a pure write workload, going
038 * through all the communication layers. The reads will work as well, but they as we never write,
039 * they will always always return an empty structure. The WAL is also skipped. Obviously, the region
040 * will never be split automatically. It's up to the user to split and move it.
041 * </p>
042 * <p>
043 * For a table created like this: create 'usertable', {NAME =&gt; 'f1', VERSIONS =&gt; 1}
044 * </p>
045 * <p>
046 * You can then add the coprocessor with this command: alter 'usertable', 'coprocessor' =&gt;
047 * '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|'
048 * </p>
049 * <p>
050 * And then put 'usertable', 'f1', 'f1', 'f1'
051 * </p>
052 * <p>
053 * scan 'usertable' Will return: 0 row(s) in 0.0050 seconds
054 * </p>
055 * TODO: It needs tests
056 */
057@InterfaceAudience.Private
058public class WriteSinkCoprocessor implements RegionCoprocessor, RegionObserver {
059  private static final Logger LOG = LoggerFactory.getLogger(WriteSinkCoprocessor.class);
060  private final AtomicLong ops = new AtomicLong();
061
062  @Override
063  public Optional<RegionObserver> getRegionObserver() {
064    return Optional.of(this);
065  }
066
067  private String regionName;
068
069  @Override
070  public void preOpen(ObserverContext<? extends RegionCoprocessorEnvironment> e)
071    throws IOException {
072    regionName = e.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString();
073  }
074
075  @Override
076  public void preBatchMutate(final ObserverContext<? extends RegionCoprocessorEnvironment> c,
077    final MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
078    if (ops.incrementAndGet() % 20000 == 0) {
079      LOG.info("Wrote " + ops.get() + " times in region " + regionName);
080    }
081
082    for (int i = 0; i < miniBatchOp.size(); i++) {
083      miniBatchOp.setOperationStatus(i,
084        new OperationStatus(HConstants.OperationStatusCode.SUCCESS));
085    }
086    c.bypass();
087  }
088}