001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.coprocessor.example; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.fail; 022 023import java.io.IOException; 024import java.util.ArrayList; 025import java.util.List; 026import org.apache.hadoop.conf.Configuration; 027import org.apache.hadoop.fs.FileSystem; 028import org.apache.hadoop.fs.Path; 029import org.apache.hadoop.hbase.HBaseClassTestRule; 030import org.apache.hadoop.hbase.HBaseTestingUtility; 031import org.apache.hadoop.hbase.HConstants; 032import org.apache.hadoop.hbase.MiniHBaseCluster; 033import org.apache.hadoop.hbase.TableName; 034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 035import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 036import org.apache.hadoop.hbase.client.RegionInfo; 037import org.apache.hadoop.hbase.client.RetriesExhaustedException; 038import org.apache.hadoop.hbase.client.Table; 039import org.apache.hadoop.hbase.client.TableDescriptor; 040import org.apache.hadoop.hbase.client.example.RefreshHFilesClient; 041import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; 042import org.apache.hadoop.hbase.master.MasterFileSystem; 043import org.apache.hadoop.hbase.regionserver.HRegion; 044import org.apache.hadoop.hbase.regionserver.HStore; 045import org.apache.hadoop.hbase.regionserver.Region; 046import org.apache.hadoop.hbase.regionserver.RegionServerServices; 047import org.apache.hadoop.hbase.testclassification.MediumTests; 048import org.apache.hadoop.hbase.util.Bytes; 049import org.apache.hadoop.hbase.util.FSUtils; 050import org.apache.hadoop.hbase.util.HFileTestUtil; 051import org.apache.hadoop.hbase.wal.WAL; 052import org.junit.After; 053import org.junit.ClassRule; 054import org.junit.Test; 055import org.junit.experimental.categories.Category; 056import org.slf4j.Logger; 057import org.slf4j.LoggerFactory; 058 059@Category(MediumTests.class) 060public class TestRefreshHFilesEndpoint { 061 062 @ClassRule 063 public static final HBaseClassTestRule CLASS_RULE = 064 HBaseClassTestRule.forClass(TestRefreshHFilesEndpoint.class); 065 066 private static final Logger LOG = LoggerFactory.getLogger(TestRefreshHFilesEndpoint.class); 067 private static final HBaseTestingUtility HTU = new HBaseTestingUtility(); 068 private static final int NUM_MASTER = 1; 069 private static final int NUM_RS = 2; 070 private static final TableName TABLE_NAME = TableName.valueOf("testRefreshRegionHFilesEP"); 071 private static final byte[] FAMILY = Bytes.toBytes("family"); 072 private static final byte[] QUALIFIER = Bytes.toBytes("qualifier"); 073 private static final byte[][] SPLIT_KEY = new byte[][] { Bytes.toBytes("30") }; 074 private static final int NUM_ROWS = 5; 075 private static final String HFILE_NAME = "123abcdef"; 076 077 private static Configuration CONF = HTU.getConfiguration(); 078 private static MiniHBaseCluster cluster; 079 private static Table table; 080 081 public static void setUp(String regionImpl) { 082 try { 083 CONF.set(HConstants.REGION_IMPL, regionImpl); 084 CONF.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); 085 086 CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, RefreshHFilesEndpoint.class.getName()); 087 cluster = HTU.startMiniCluster(NUM_MASTER, NUM_RS); 088 089 // Create table 090 table = HTU.createTable(TABLE_NAME, FAMILY, SPLIT_KEY); 091 092 // this will create 2 regions spread across slaves 093 HTU.loadNumericRows(table, FAMILY, 1, 20); 094 HTU.flush(TABLE_NAME); 095 } catch (Exception ex) { 096 LOG.error("Couldn't finish setup", ex); 097 } 098 } 099 100 @After 101 public void tearDown() throws Exception { 102 HTU.shutdownMiniCluster(); 103 } 104 105 @Test 106 public void testRefreshRegionHFilesEndpoint() throws Exception { 107 setUp(HRegion.class.getName()); 108 MasterFileSystem mfs = HTU.getMiniHBaseCluster().getMaster().getMasterFileSystem(); 109 Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), TABLE_NAME); 110 for (Region region : cluster.getRegions(TABLE_NAME)) { 111 Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName()); 112 Path familyDir = new Path(regionDir, Bytes.toString(FAMILY)); 113 HFileTestUtil 114 .createHFile(HTU.getConfiguration(), HTU.getTestFileSystem(), new Path(familyDir, HFILE_NAME), FAMILY, 115 QUALIFIER, Bytes.toBytes("50"), Bytes.toBytes("60"), NUM_ROWS); 116 } 117 assertEquals(2, HTU.getNumHFiles(TABLE_NAME, FAMILY)); 118 callRefreshRegionHFilesEndPoint(); 119 assertEquals(4, HTU.getNumHFiles(TABLE_NAME, FAMILY)); 120 } 121 122 @Test(expected = IOException.class) 123 public void testRefreshRegionHFilesEndpointWithException() throws IOException { 124 setUp(HRegionForRefreshHFilesEP.class.getName()); 125 callRefreshRegionHFilesEndPoint(); 126 } 127 128 private void callRefreshRegionHFilesEndPoint() throws IOException { 129 try { 130 RefreshHFilesClient refreshHFilesClient = new RefreshHFilesClient(CONF); 131 refreshHFilesClient.refreshHFiles(TABLE_NAME); 132 } catch (RetriesExhaustedException rex) { 133 if (rex.getCause() instanceof IOException) 134 throw new IOException(); 135 } catch (Throwable ex) { 136 LOG.error(ex.toString(), ex); 137 fail("Couldn't call the RefreshRegionHFilesEndpoint"); 138 } 139 } 140 141 public static class HRegionForRefreshHFilesEP extends HRegion { 142 HStoreWithFaultyRefreshHFilesAPI store; 143 144 public HRegionForRefreshHFilesEP(final Path tableDir, final WAL wal, final FileSystem fs, 145 final Configuration confParam, final RegionInfo regionInfo, 146 final TableDescriptor htd, final RegionServerServices rsServices) { 147 super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices); 148 } 149 150 @Override 151 public List<HStore> getStores() { 152 List<HStore> list = new ArrayList<>(stores.size()); 153 /** 154 * This is used to trigger the custom definition (faulty) 155 * of refresh HFiles API. 156 */ 157 try { 158 if (this.store == null) { 159 store = new HStoreWithFaultyRefreshHFilesAPI(this, 160 ColumnFamilyDescriptorBuilder.of(FAMILY), this.conf); 161 } 162 list.add(store); 163 } catch (IOException ioe) { 164 LOG.info("Couldn't instantiate custom store implementation", ioe); 165 } 166 167 list.addAll(stores.values()); 168 return list; 169 } 170 } 171 172 public static class HStoreWithFaultyRefreshHFilesAPI extends HStore { 173 public HStoreWithFaultyRefreshHFilesAPI(final HRegion region, final ColumnFamilyDescriptor family, 174 final Configuration confParam) throws IOException { 175 super(region, family, confParam); 176 } 177 178 @Override 179 public void refreshStoreFiles() throws IOException { 180 throw new IOException(); 181 } 182 } 183}