001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.apache.hadoop.hbase.regionserver.HRegion.SPLIT_IGNORE_BLOCKING_ENABLED_KEY; 021import static org.apache.hadoop.hbase.regionserver.Store.PRIORITY_USER; 022import static org.junit.jupiter.api.Assertions.assertEquals; 023import static org.junit.jupiter.api.Assertions.assertFalse; 024import static org.junit.jupiter.api.Assertions.assertNotNull; 025import static org.junit.jupiter.api.Assertions.assertTrue; 026 027import java.util.List; 028import org.apache.hadoop.hbase.HBaseTestingUtil; 029import org.apache.hadoop.hbase.HConstants; 030import org.apache.hadoop.hbase.TableName; 031import org.apache.hadoop.hbase.client.Admin; 032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 033import org.apache.hadoop.hbase.client.Put; 034import org.apache.hadoop.hbase.client.ResultScanner; 035import org.apache.hadoop.hbase.client.Scan; 036import org.apache.hadoop.hbase.client.Table; 037import org.apache.hadoop.hbase.client.TableDescriptor; 038import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 039import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure; 040import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; 041import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; 042import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; 043import org.apache.hadoop.hbase.testclassification.MediumTests; 044import org.apache.hadoop.hbase.util.Bytes; 045import org.junit.jupiter.api.AfterAll; 046import org.junit.jupiter.api.BeforeAll; 047import org.junit.jupiter.api.Tag; 048import org.junit.jupiter.api.Test; 049 050import org.apache.hbase.thirdparty.com.google.common.io.Closeables; 051 052@Tag(MediumTests.TAG) 053public class TestSplitWithBlockingFiles { 054 055 protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); 056 private static TableName TABLE_NAME = TableName.valueOf("test"); 057 private static Admin ADMIN; 058 private static byte[] CF = Bytes.toBytes("cf"); 059 private static Table TABLE; 060 061 @BeforeAll 062 public static void setupCluster() throws Exception { 063 UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 8 * 2 * 10240L); 064 UTIL.getConfiguration().setInt(HStore.BLOCKING_STOREFILES_KEY, 1); 065 UTIL.getConfiguration().set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 066 ConstantSizeRegionSplitPolicy.class.getName()); 067 UTIL.getConfiguration().setBoolean(SPLIT_IGNORE_BLOCKING_ENABLED_KEY, true); 068 UTIL.startMiniCluster(1); 069 ADMIN = UTIL.getAdmin(); 070 TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME) 071 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF).setBlocksize(1000).build()) 072 .build(); 073 TABLE = UTIL.createTable(td, null); 074 UTIL.waitTableAvailable(TABLE_NAME); 075 } 076 077 @AfterAll 078 public static void cleanupTest() throws Exception { 079 Closeables.close(TABLE, true); 080 UTIL.shutdownMiniCluster(); 081 } 082 083 @Test 084 public void testSplitIgnoreBlockingFiles() throws Exception { 085 ADMIN.splitSwitch(false, true); 086 byte[] value = new byte[1024]; 087 for (int m = 0; m < 10; m++) { 088 String rowPrefix = "row" + m; 089 for (int i = 0; i < 10; i++) { 090 Put p = new Put(Bytes.toBytes(rowPrefix + i)); 091 p.addColumn(CF, Bytes.toBytes("qualifier"), value); 092 p.addColumn(CF, Bytes.toBytes("qualifier2"), value); 093 TABLE.put(p); 094 } 095 ADMIN.flush(TABLE_NAME); 096 } 097 Scan scan = new Scan(); 098 ResultScanner results = TABLE.getScanner(scan); 099 int count = 0; 100 while (results.next() != null) { 101 count++; 102 } 103 assertEquals(100, count, "There should be 100 rows!"); 104 List<HRegion> regions = UTIL.getMiniHBaseCluster().getRegionServer(0).getRegions(); 105 regions.removeIf(r -> !r.getRegionInfo().getTable().equals(TABLE_NAME)); 106 assertEquals(1, regions.size()); 107 assertNotNull(regions.get(0).getSplitPolicy().getSplitPoint()); 108 assertTrue(regions.get(0).getCompactPriority() >= PRIORITY_USER); 109 assertTrue(UTIL.getMiniHBaseCluster().getRegionServer(0).getCompactSplitThread() 110 .requestSplit(regions.get(0))); 111 112 // split region 113 ADMIN.splitSwitch(true, true); 114 MasterProcedureEnv env = 115 UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); 116 final ProcedureExecutor<MasterProcedureEnv> executor = 117 UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); 118 SplitTableRegionProcedure splitProcedure = 119 new SplitTableRegionProcedure(env, regions.get(0).getRegionInfo(), Bytes.toBytes("row5")); 120 executor.submitProcedure(splitProcedure); 121 ProcedureTestingUtility.waitProcedure(executor, splitProcedure.getProcId()); 122 123 regions = UTIL.getMiniHBaseCluster().getRegionServer(0).getRegions(); 124 regions.removeIf(r -> !r.getRegionInfo().getTable().equals(TABLE_NAME)); 125 assertEquals(2, regions.size()); 126 scan = new Scan(); 127 results = TABLE.getScanner(scan); 128 count = 0; 129 while (results.next() != null) { 130 count++; 131 } 132 assertEquals(100, count, "There should be 100 rows!"); 133 for (HRegion region : regions) { 134 assertTrue(region.getCompactPriority() < PRIORITY_USER); 135 assertFalse( 136 UTIL.getMiniHBaseCluster().getRegionServer(0).getCompactSplitThread().requestSplit(region)); 137 } 138 } 139}