001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import java.io.IOException; 021import java.util.HashMap; 022import java.util.Map; 023import org.apache.hadoop.hbase.HBaseClassTestRule; 024import org.apache.hadoop.hbase.HConstants; 025import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; 026import org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory; 027import org.apache.hadoop.hbase.testclassification.RegionServerTests; 028import org.apache.hadoop.hbase.testclassification.SmallTests; 029import org.junit.ClassRule; 030import org.junit.Test; 031import org.junit.experimental.categories.Category; 032 033@Category({ RegionServerTests.class, SmallTests.class }) 034public class TestDateTieredCompactionPolicyHeterogeneousStorage 035 extends AbstractTestDateTieredCompactionPolicy { 036 @ClassRule 037 public static final HBaseClassTestRule CLASS_RULE = 038 HBaseClassTestRule.forClass(TestDateTieredCompactionPolicyHeterogeneousStorage.class); 039 public static final String HOT_WINDOW_SP = "ALL_SSD"; 040 public static final String WARM_WINDOW_SP = "ONE_SSD"; 041 public static final String COLD_WINDOW_SP = "HOT"; 042 043 @Override 044 protected void config() { 045 super.config(); 046 047 // Set up policy 048 conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, 049 "org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine"); 050 conf.setLong(CompactionConfiguration.DATE_TIERED_MAX_AGE_MILLIS_KEY, 100); 051 conf.setLong(CompactionConfiguration.DATE_TIERED_INCOMING_WINDOW_MIN_KEY, 3); 052 conf.setLong(ExponentialCompactionWindowFactory.BASE_WINDOW_MILLIS_KEY, 6); 053 conf.setInt(ExponentialCompactionWindowFactory.WINDOWS_PER_TIER_KEY, 4); 054 conf.setBoolean(CompactionConfiguration.DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY, 055 false); 056 057 // Special settings for compaction policy per window 058 this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 2); 059 this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 12); 060 this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.2F); 061 062 conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 20); 063 conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 5); 064 065 // Set Storage Policy for different type window 066 conf.setBoolean(CompactionConfiguration.DATE_TIERED_STORAGE_POLICY_ENABLE_KEY, true); 067 conf.setLong(CompactionConfiguration.DATE_TIERED_HOT_WINDOW_AGE_MILLIS_KEY, 6); 068 conf.set(CompactionConfiguration.DATE_TIERED_HOT_WINDOW_STORAGE_POLICY_KEY, HOT_WINDOW_SP); 069 conf.setLong(CompactionConfiguration.DATE_TIERED_WARM_WINDOW_AGE_MILLIS_KEY, 12); 070 conf.set(CompactionConfiguration.DATE_TIERED_WARM_WINDOW_STORAGE_POLICY_KEY, WARM_WINDOW_SP); 071 conf.set(CompactionConfiguration.DATE_TIERED_COLD_WINDOW_STORAGE_POLICY_KEY, COLD_WINDOW_SP); 072 } 073 074 /** 075 * Test for minor compaction of incoming window. Incoming window start ts >= now - hot age. So it 076 * is HOT window, will use HOT_WINDOW_SP. 077 * @throws IOException with error 078 */ 079 @Test 080 public void testIncomingWindowHot() throws IOException { 081 long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 082 long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; 083 long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 25, 10, 11, 12, 13 }; 084 Map<Long, String> expected = new HashMap<>(); 085 // expected DateTieredCompactionRequest boundaries = { Long.MIN_VALUE, 12 } 086 // test whether DateTieredCompactionRequest boundariesPolicies matches expected 087 expected.put(12L, HOT_WINDOW_SP); 088 compactEqualsStoragePolicy(16, sfCreate(minTimestamps, maxTimestamps, sizes), expected, false, 089 true); 090 } 091 092 /** 093 * Test for not incoming window. now - hot age > window start >= now - warm age, so this window 094 * and is WARM window, will use WARM_WINDOW_SP 095 * @throws IOException with error 096 */ 097 @Test 098 public void testNotIncomingWindowWarm() throws IOException { 099 long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 100 long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 }; 101 long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 25, 10, 11 }; 102 Map<Long, String> expected = new HashMap<>(); 103 // expected DateTieredCompactionRequest boundaries = { Long.MIN_VALUE, 6 } 104 expected.put(6L, WARM_WINDOW_SP); 105 compactEqualsStoragePolicy(16, sfCreate(minTimestamps, maxTimestamps, sizes), expected, false, 106 true); 107 } 108 109 /** 110 * Test for not incoming window. this window start ts >= ow - hot age, So this incoming window and 111 * is HOT window. Use HOT_WINDOW_SP 112 * @throws IOException with error 113 */ 114 @Test 115 public void testNotIncomingWindowAndIsHot() throws IOException { 116 long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 117 long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 }; 118 long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 25, 10, 11 }; 119 Map<Long, String> expected = new HashMap<>(); 120 // expected DateTieredCompactionRequest boundaries = { Long.MIN_VALUE, 6 } 121 expected.put(6L, HOT_WINDOW_SP); 122 compactEqualsStoragePolicy(12, sfCreate(minTimestamps, maxTimestamps, sizes), expected, false, 123 true); 124 } 125 126 /** 127 * Test for not incoming window. COLD window start timestamp < now - warm age, so use 128 * COLD_WINDOW_SP 129 * @throws IOException with error 130 */ 131 @Test 132 public void testColdWindow() throws IOException { 133 long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 134 long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }; 135 long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 25, 10 }; 136 Map<Long, String> expected = new HashMap<>(); 137 // expected DateTieredCompactionRequest boundaries = { Long.MIN_VALUE, 6 } 138 expected.put(6L, COLD_WINDOW_SP); 139 compactEqualsStoragePolicy(22, sfCreate(minTimestamps, maxTimestamps, sizes), expected, false, 140 true); 141 } 142 143 /** 144 * Test for not incoming window. but not all hfiles will be selected to compact. Apply exploring 145 * logic on non-incoming window. More than one hfile left in this window. this means minor compact 146 * single out is true. boundaries only contains Long.MIN_VALUE 147 * @throws IOException with error 148 */ 149 @Test 150 public void testRatioT0() throws IOException { 151 long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 152 long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }; 153 long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 280, 23, 24, 1 }; 154 Map<Long, String> expected = new HashMap<>(); 155 // window start = 6, expected DateTieredCompactionRequest boundaries = { Long.MIN_VALUE } 156 expected.put(Long.MIN_VALUE, WARM_WINDOW_SP); 157 compactEqualsStoragePolicy(16, sfCreate(minTimestamps, maxTimestamps, sizes), expected, false, 158 true); 159 } 160 161 /** 162 * Test for Major compaction. It will compact all files and create multi output files with 163 * different window storage policy. 164 * @throws IOException with error 165 */ 166 @Test 167 public void testMajorCompation() throws IOException { 168 long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 169 long[] maxTimestamps = new long[] { 44, 60, 61, 96, 100, 104, 105, 106, 113, 145, 157 }; 170 long[] sizes = new long[] { 0, 50, 51, 40, 41, 42, 33, 30, 31, 2, 1 }; 171 Map<Long, String> expected = new HashMap<>(); 172 expected.put(Long.MIN_VALUE, COLD_WINDOW_SP); 173 expected.put(24L, COLD_WINDOW_SP); 174 expected.put(48L, COLD_WINDOW_SP); 175 expected.put(72L, COLD_WINDOW_SP); 176 expected.put(96L, COLD_WINDOW_SP); 177 expected.put(120L, COLD_WINDOW_SP); 178 expected.put(144L, COLD_WINDOW_SP); 179 expected.put(150L, WARM_WINDOW_SP); 180 expected.put(156L, HOT_WINDOW_SP); 181 compactEquals(161, sfCreate(minTimestamps, maxTimestamps, sizes), 182 new long[] { 0, 50, 51, 40, 41, 42, 33, 30, 31, 2, 1 }, 183 new long[] { Long.MIN_VALUE, 24, 48, 72, 96, 120, 144, 150, 156 }, true, true); 184 compactEqualsStoragePolicy(161, sfCreate(minTimestamps, maxTimestamps, sizes), expected, true, 185 true); 186 } 187}