001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver.compactions;
019
020import static org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter.CUSTOM_TIERING_TIME_RANGE;
021import static org.apache.hadoop.hbase.regionserver.compactions.CustomCellTieringValueProvider.TIERING_CELL_QUALIFIER;
022import static org.junit.Assert.assertEquals;
023import static org.junit.Assert.assertNotNull;
024import static org.junit.Assert.fail;
025
026import java.io.IOException;
027import java.util.ArrayList;
028import java.util.List;
029import org.apache.hadoop.hbase.HBaseClassTestRule;
030import org.apache.hadoop.hbase.HBaseTestingUtil;
031import org.apache.hadoop.hbase.TableName;
032import org.apache.hadoop.hbase.Waiter;
033import org.apache.hadoop.hbase.client.Admin;
034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
035import org.apache.hadoop.hbase.client.Connection;
036import org.apache.hadoop.hbase.client.Put;
037import org.apache.hadoop.hbase.client.Table;
038import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
039import org.apache.hadoop.hbase.regionserver.CustomTieredStoreEngine;
040import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
041import org.apache.hadoop.hbase.testclassification.RegionServerTests;
042import org.apache.hadoop.hbase.testclassification.SmallTests;
043import org.apache.hadoop.hbase.util.Bytes;
044import org.junit.After;
045import org.junit.Before;
046import org.junit.ClassRule;
047import org.junit.Test;
048import org.junit.experimental.categories.Category;
049
050@Category({ RegionServerTests.class, SmallTests.class })
051public class TestCustomCellTieredCompactor {
052
053  @ClassRule
054  public static final HBaseClassTestRule CLASS_RULE =
055    HBaseClassTestRule.forClass(TestCustomCellTieredCompactor.class);
056
057  public static final byte[] FAMILY = Bytes.toBytes("cf");
058
059  protected HBaseTestingUtil utility;
060
061  protected Admin admin;
062
063  @Before
064  public void setUp() throws Exception {
065    utility = new HBaseTestingUtil();
066    utility.getConfiguration().setInt("hbase.hfile.compaction.discharger.interval", 10);
067    utility.startMiniCluster();
068  }
069
070  @After
071  public void tearDown() throws Exception {
072    utility.shutdownMiniCluster();
073  }
074
075  @Test
076  public void testCustomCellTieredCompactor() throws Exception {
077    ColumnFamilyDescriptorBuilder clmBuilder = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY);
078    clmBuilder.setValue("hbase.hstore.engine.class", CustomTieredStoreEngine.class.getName());
079    clmBuilder.setValue(TIERING_CELL_QUALIFIER, "date");
080    TableName tableName = TableName.valueOf("testCustomCellTieredCompactor");
081    TableDescriptorBuilder tblBuilder = TableDescriptorBuilder.newBuilder(tableName);
082    tblBuilder.setColumnFamily(clmBuilder.build());
083    utility.getAdmin().createTable(tblBuilder.build());
084    utility.waitTableAvailable(tableName);
085    Connection connection = utility.getConnection();
086    Table table = connection.getTable(tableName);
087    long recordTime = System.currentTimeMillis();
088    // write data and flush multiple store files:
089    for (int i = 0; i < 6; i++) {
090      List<Put> puts = new ArrayList<>(2);
091      Put put = new Put(Bytes.toBytes(i));
092      put.addColumn(FAMILY, Bytes.toBytes("val"), Bytes.toBytes("v" + i));
093      put.addColumn(FAMILY, Bytes.toBytes("date"),
094        Bytes.toBytes(recordTime - (11L * 366L * 24L * 60L * 60L * 1000L)));
095      puts.add(put);
096      put = new Put(Bytes.toBytes(i + 1000));
097      put.addColumn(FAMILY, Bytes.toBytes("val"), Bytes.toBytes("v" + (i + 1000)));
098      put.addColumn(FAMILY, Bytes.toBytes("date"), Bytes.toBytes(recordTime));
099      puts.add(put);
100      table.put(puts);
101      utility.flush(tableName);
102    }
103    table.close();
104    long firstCompactionTime = System.currentTimeMillis();
105    utility.getAdmin().majorCompact(tableName);
106    Waiter.waitFor(utility.getConfiguration(), 5000,
107      () -> utility.getMiniHBaseCluster().getMaster().getLastMajorCompactionTimestamp(tableName)
108          > firstCompactionTime);
109    long numHFiles = utility.getNumHFiles(tableName, FAMILY);
110    // The first major compaction would have no means to detect more than one tier,
111    // because without the min/max values available in the file info portion of the selected files
112    // for compaction, CustomCellDateTieredCompactionPolicy has no means
113    // to calculate the proper boundaries.
114    assertEquals(1, numHFiles);
115    utility.getMiniHBaseCluster().getRegions(tableName).get(0).getStore(FAMILY).getStorefiles()
116      .forEach(file -> {
117        byte[] rangeBytes = file.getMetadataValue(CUSTOM_TIERING_TIME_RANGE);
118        assertNotNull(rangeBytes);
119        try {
120          TimeRangeTracker timeRangeTracker = TimeRangeTracker.parseFrom(rangeBytes);
121          assertEquals((recordTime - (11L * 366L * 24L * 60L * 60L * 1000L)),
122            timeRangeTracker.getMin());
123          assertEquals(recordTime, timeRangeTracker.getMax());
124        } catch (IOException e) {
125          fail(e.getMessage());
126        }
127      });
128    // now do major compaction again, to make sure we write two separate files
129    long secondCompactionTime = System.currentTimeMillis();
130    utility.getAdmin().majorCompact(tableName);
131    Waiter.waitFor(utility.getConfiguration(), 5000,
132      () -> utility.getMiniHBaseCluster().getMaster().getLastMajorCompactionTimestamp(tableName)
133          > secondCompactionTime);
134    numHFiles = utility.getNumHFiles(tableName, FAMILY);
135    assertEquals(2, numHFiles);
136    utility.getMiniHBaseCluster().getRegions(tableName).get(0).getStore(FAMILY).getStorefiles()
137      .forEach(file -> {
138        byte[] rangeBytes = file.getMetadataValue(CUSTOM_TIERING_TIME_RANGE);
139        assertNotNull(rangeBytes);
140        try {
141          TimeRangeTracker timeRangeTracker = TimeRangeTracker.parseFrom(rangeBytes);
142          assertEquals(timeRangeTracker.getMin(), timeRangeTracker.getMax());
143        } catch (IOException e) {
144          fail(e.getMessage());
145        }
146      });
147  }
148}