001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.tool;
020
021import java.io.IOException;
022import java.util.Optional;
023import java.util.concurrent.atomic.AtomicLong;
024import org.apache.hadoop.hbase.HConstants;
025import org.apache.hadoop.hbase.client.Mutation;
026import org.apache.hadoop.hbase.coprocessor.ObserverContext;
027import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
028import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
029import org.apache.hadoop.hbase.coprocessor.RegionObserver;
030import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
031import org.apache.hadoop.hbase.regionserver.OperationStatus;
032import org.apache.yetus.audience.InterfaceAudience;
033import org.slf4j.Logger;
034import org.slf4j.LoggerFactory;
035
036/**
037 * <p>
038 * This coprocessor 'shallows' all the writes. It allows to test a pure
039 * write workload, going through all the communication layers.
040 * The reads will work as well, but they as we never write, they will always always
041 * return an empty structure. The WAL is also skipped.
042 * Obviously, the region will never be split automatically. It's up to the user
043 * to split and move it.
044 * </p>
045 * <p>
046 * For a table created like this:
047 * create 'usertable', {NAME =&gt; 'f1', VERSIONS =&gt; 1}
048 * </p>
049 * <p>
050 * You can then add the coprocessor with this command:
051 * alter 'usertable', 'coprocessor' =&gt; '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|'
052 * </p>
053 * <p>
054 * And then
055 * put 'usertable', 'f1', 'f1', 'f1'
056 * </p>
057 * <p>
058 * scan 'usertable'
059 * Will return:
060 * 0 row(s) in 0.0050 seconds
061 * </p>
062 * TODO: It needs tests
063 */
064@InterfaceAudience.Private
065public class WriteSinkCoprocessor implements RegionCoprocessor, RegionObserver {
066  private static final Logger LOG = LoggerFactory.getLogger(WriteSinkCoprocessor.class);
067  private final AtomicLong ops = new AtomicLong();
068
069  @Override
070  public Optional<RegionObserver> getRegionObserver() {
071    return Optional.of(this);
072  }
073
074  private String regionName;
075
076  @Override
077  public void preOpen(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
078    regionName = e.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString();
079  }
080
081  @Override
082  public void preBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> c,
083                             final MiniBatchOperationInProgress<Mutation> miniBatchOp)
084      throws IOException {
085    if (ops.incrementAndGet() % 20000 == 0) {
086      LOG.info("Wrote " + ops.get() + " times in region " + regionName);
087    }
088
089    for (int i = 0; i < miniBatchOp.size(); i++) {
090      miniBatchOp.setOperationStatus(i,
091          new OperationStatus(HConstants.OperationStatusCode.SUCCESS));
092    }
093    c.bypass();
094  }
095}