001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.coprocessor.example;
019
020import static org.junit.jupiter.api.Assertions.assertEquals;
021import static org.junit.jupiter.api.Assertions.assertTrue;
022
023import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
024import org.apache.hadoop.hbase.client.Result;
025import org.apache.hadoop.hbase.client.ResultScanner;
026import org.apache.hadoop.hbase.client.Scan;
027import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
028import org.apache.hadoop.hbase.regionserver.HRegion;
029import org.apache.hadoop.hbase.regionserver.HStore;
030import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
031import org.apache.hadoop.hbase.testclassification.MediumTests;
032import org.junit.jupiter.api.AfterAll;
033import org.junit.jupiter.api.BeforeAll;
034import org.junit.jupiter.api.Tag;
035import org.junit.jupiter.api.Test;
036
037@Tag(CoprocessorTests.TAG)
038@Tag(MediumTests.TAG)
039public class TestWriteHeavyIncrementObserver extends WriteHeavyIncrementObserverTestBase {
040
041  @BeforeAll
042  public static void setUp() throws Exception {
043    WriteHeavyIncrementObserverTestBase.setUp();
044    UTIL.getAdmin()
045      .createTable(TableDescriptorBuilder.newBuilder(NAME)
046        .setCoprocessor(WriteHeavyIncrementObserver.class.getName())
047        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build());
048    TABLE = UTIL.getConnection().getTable(NAME);
049  }
050
051  @AfterAll
052  public static void tearDown() throws Exception {
053    if (TABLE != null) {
054      TABLE.close();
055    }
056    UTIL.shutdownMiniCluster();
057  }
058
059  @Test
060  public void test() throws Exception {
061    doIncrement(0);
062    assertSum();
063    // we do not hack scan operation so using scan we could get the original values added into the
064    // table.
065    try (ResultScanner scanner = TABLE.getScanner(new Scan().withStartRow(ROW)
066      .withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) {
067      Result r = scanner.next();
068      assertTrue(r.rawCells().length > 2);
069    }
070    UTIL.flush(NAME);
071    HRegion region = UTIL.getHBaseCluster().findRegionsForTable(NAME).get(0);
072    HStore store = region.getStore(FAMILY);
073    for (;;) {
074      region.compact(true);
075      if (store.getStorefilesCount() == 1) {
076        break;
077      }
078    }
079    assertSum();
080    // Should only have two cells after flush and major compaction
081    try (ResultScanner scanner = TABLE.getScanner(new Scan().withStartRow(ROW)
082      .withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) {
083      Result r = scanner.next();
084      assertEquals(2, r.rawCells().length);
085    }
086  }
087}