001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.fail; 022 023import java.io.IOException; 024import java.util.List; 025import java.util.concurrent.ThreadPoolExecutor; 026import org.apache.hadoop.conf.Configuration; 027import org.apache.hadoop.fs.FileSystem; 028import org.apache.hadoop.fs.Path; 029import org.apache.hadoop.hbase.HBaseClassTestRule; 030import org.apache.hadoop.hbase.HBaseTestingUtility; 031import org.apache.hadoop.hbase.HColumnDescriptor; 032import org.apache.hadoop.hbase.HConstants; 033import org.apache.hadoop.hbase.HRegionInfo; 034import org.apache.hadoop.hbase.HTableDescriptor; 035import org.apache.hadoop.hbase.TableName; 036import org.apache.hadoop.hbase.client.Admin; 037import org.apache.hadoop.hbase.client.Connection; 038import org.apache.hadoop.hbase.client.ConnectionFactory; 039import org.apache.hadoop.hbase.executor.ExecutorType; 040import org.apache.hadoop.hbase.testclassification.MediumTests; 041import org.apache.hadoop.hbase.testclassification.RegionServerTests; 042import org.apache.hadoop.hbase.util.Bytes; 043import org.apache.hadoop.hbase.util.CommonFSUtils; 044import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 045import org.junit.AfterClass; 046import org.junit.BeforeClass; 047import org.junit.ClassRule; 048import org.junit.Rule; 049import org.junit.Test; 050import org.junit.experimental.categories.Category; 051import org.junit.rules.TestName; 052import org.slf4j.Logger; 053import org.slf4j.LoggerFactory; 054 055@Category({ MediumTests.class, RegionServerTests.class }) 056public class TestRegionOpen { 057 058 @ClassRule 059 public static final HBaseClassTestRule CLASS_RULE = 060 HBaseClassTestRule.forClass(TestRegionOpen.class); 061 062 @SuppressWarnings("unused") 063 private static final Logger LOG = LoggerFactory.getLogger(TestRegionOpen.class); 064 private static final int NB_SERVERS = 1; 065 066 private static final HBaseTestingUtility HTU = new HBaseTestingUtility(); 067 068 @Rule 069 public TestName name = new TestName(); 070 071 @BeforeClass 072 public static void before() throws Exception { 073 HTU.startMiniCluster(NB_SERVERS); 074 } 075 076 @AfterClass 077 public static void afterClass() throws Exception { 078 HTU.shutdownMiniCluster(); 079 } 080 081 private static HRegionServer getRS() { 082 return HTU.getHBaseCluster().getLiveRegionServerThreads().get(0).getRegionServer(); 083 } 084 085 @Test 086 public void testPriorityRegionIsOpenedWithSeparateThreadPool() throws Exception { 087 final TableName tableName = TableName.valueOf(TestRegionOpen.class.getSimpleName()); 088 ThreadPoolExecutor exec = 089 getRS().getExecutorService().getExecutorThreadPool(ExecutorType.RS_OPEN_PRIORITY_REGION); 090 long completed = exec.getCompletedTaskCount(); 091 092 HTableDescriptor htd = new HTableDescriptor(tableName); 093 htd.setPriority(HConstants.HIGH_QOS); 094 htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); 095 try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); 096 Admin admin = connection.getAdmin()) { 097 admin.createTable(htd); 098 } 099 100 assertEquals(completed + 1, exec.getCompletedTaskCount()); 101 } 102 103 @Test 104 public void testNonExistentRegionReplica() throws Exception { 105 final TableName tableName = TableName.valueOf(name.getMethodName()); 106 final byte[] FAMILYNAME = Bytes.toBytes("fam"); 107 FileSystem fs = HTU.getTestFileSystem(); 108 Admin admin = HTU.getAdmin(); 109 Configuration conf = HTU.getConfiguration(); 110 Path rootDir = HTU.getDataTestDirOnTestFS(); 111 112 HTableDescriptor htd = new HTableDescriptor(tableName); 113 htd.addFamily(new HColumnDescriptor(FAMILYNAME)); 114 admin.createTable(htd); 115 HTU.waitUntilNoRegionsInTransition(60000); 116 117 // Create new HRI with non-default region replica id 118 HRegionInfo hri = new HRegionInfo(htd.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("B"), 119 false, EnvironmentEdgeManager.currentTime(), 2); 120 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, 121 CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri); 122 Path regionDir = regionFs.getRegionDir(); 123 try { 124 HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); 125 } catch (IOException e) { 126 LOG.info("Caught expected IOE due missing .regioninfo file, due: " + e.getMessage() 127 + " skipping region open."); 128 // We should only have 1 region online 129 List<HRegionInfo> regions = admin.getTableRegions(tableName); 130 LOG.info("Regions: " + regions); 131 if (regions.size() != 1) { 132 fail("Table " + tableName + " should have only one region, but got more: " + regions); 133 } 134 return; 135 } 136 fail("Should have thrown IOE when attempting to open a non-existing region."); 137 } 138}