001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertTrue; 022import static org.junit.Assert.fail; 023import static org.mockito.Mockito.mock; 024import static org.mockito.Mockito.when; 025 026import java.io.IOException; 027import java.util.ArrayList; 028import java.util.Arrays; 029import java.util.List; 030import org.apache.hadoop.conf.Configuration; 031import org.apache.hadoop.fs.FileSystem; 032import org.apache.hadoop.fs.Path; 033import org.apache.hadoop.hbase.Cell; 034import org.apache.hadoop.hbase.CellUtil; 035import org.apache.hadoop.hbase.HBaseClassTestRule; 036import org.apache.hadoop.hbase.HBaseTestingUtility; 037import org.apache.hadoop.hbase.HConstants; 038import org.apache.hadoop.hbase.Stoppable; 039import org.apache.hadoop.hbase.TableName; 040import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 041import org.apache.hadoop.hbase.client.Durability; 042import org.apache.hadoop.hbase.client.Get; 043import org.apache.hadoop.hbase.client.Put; 044import org.apache.hadoop.hbase.client.RegionInfo; 045import org.apache.hadoop.hbase.client.RegionInfoBuilder; 046import org.apache.hadoop.hbase.client.Result; 047import org.apache.hadoop.hbase.client.TableDescriptor; 048import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 049import org.apache.hadoop.hbase.testclassification.MediumTests; 050import org.apache.hadoop.hbase.testclassification.RegionServerTests; 051import org.apache.hadoop.hbase.util.Bytes; 052import org.apache.hadoop.hbase.util.CommonFSUtils; 053import org.apache.hadoop.hbase.util.StoppableImplementation; 054import org.apache.hadoop.hbase.wal.WALFactory; 055import org.junit.Before; 056import org.junit.ClassRule; 057import org.junit.Rule; 058import org.junit.Test; 059import org.junit.experimental.categories.Category; 060import org.junit.rules.TestName; 061 062@Category({ RegionServerTests.class, MediumTests.class }) 063public class TestStoreFileRefresherChore { 064 065 @ClassRule 066 public static final HBaseClassTestRule CLASS_RULE = 067 HBaseClassTestRule.forClass(TestStoreFileRefresherChore.class); 068 069 private HBaseTestingUtility TEST_UTIL; 070 private Path testDir; 071 072 @Rule 073 public TestName name = new TestName(); 074 075 @Before 076 public void setUp() throws IOException { 077 TEST_UTIL = new HBaseTestingUtility(); 078 testDir = TEST_UTIL.getDataTestDir("TestStoreFileRefresherChore"); 079 CommonFSUtils.setRootDir(TEST_UTIL.getConfiguration(), testDir); 080 } 081 082 private TableDescriptor getTableDesc(TableName tableName, int regionReplication, 083 byte[]... families) { 084 TableDescriptorBuilder builder = 085 TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication); 086 Arrays.stream(families).map(family -> ColumnFamilyDescriptorBuilder.newBuilder(family) 087 .setMaxVersions(Integer.MAX_VALUE).build()).forEachOrdered(builder::setColumnFamily); 088 return builder.build(); 089 } 090 091 static class FailingHRegionFileSystem extends HRegionFileSystem { 092 boolean fail = false; 093 094 FailingHRegionFileSystem(Configuration conf, FileSystem fs, Path tableDir, 095 RegionInfo regionInfo) { 096 super(conf, fs, tableDir, regionInfo); 097 } 098 099 @Override 100 public List<StoreFileInfo> getStoreFiles(String familyName) throws IOException { 101 if (fail) { 102 throw new IOException("simulating FS failure"); 103 } 104 return super.getStoreFiles(familyName); 105 } 106 } 107 108 private HRegion initHRegion(TableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId) 109 throws IOException { 110 Configuration conf = TEST_UTIL.getConfiguration(); 111 Path tableDir = CommonFSUtils.getTableDir(testDir, htd.getTableName()); 112 113 RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKey) 114 .setEndKey(stopKey).setRegionId(0L).setReplicaId(replicaId).build(); 115 HRegionFileSystem fs = 116 new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info); 117 final Configuration walConf = new Configuration(conf); 118 CommonFSUtils.setRootDir(walConf, tableDir); 119 final WALFactory wals = new WALFactory(walConf, "log_" + replicaId); 120 ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, 121 MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); 122 HRegion region = new HRegion(fs, wals.getWAL(info), conf, htd, null); 123 124 region.initialize(); 125 126 return region; 127 } 128 129 private void putData(Region region, int startRow, int numRows, byte[] qf, byte[]... families) 130 throws IOException { 131 for (int i = startRow; i < startRow + numRows; i++) { 132 Put put = new Put(Bytes.toBytes("" + i)); 133 put.setDurability(Durability.SKIP_WAL); 134 for (byte[] family : families) { 135 put.addColumn(family, qf, null); 136 } 137 region.put(put); 138 } 139 } 140 141 private void verifyDataExpectFail(Region newReg, int startRow, int numRows, byte[] qf, 142 byte[]... families) throws IOException { 143 boolean threw = false; 144 try { 145 verifyData(newReg, startRow, numRows, qf, families); 146 } catch (AssertionError e) { 147 threw = true; 148 } 149 if (!threw) { 150 fail("Expected data verification to fail"); 151 } 152 } 153 154 private void verifyData(Region newReg, int startRow, int numRows, byte[] qf, byte[]... families) 155 throws IOException { 156 for (int i = startRow; i < startRow + numRows; i++) { 157 byte[] row = Bytes.toBytes("" + i); 158 Get get = new Get(row); 159 for (byte[] family : families) { 160 get.addColumn(family, qf); 161 } 162 Result result = newReg.get(get); 163 Cell[] raw = result.rawCells(); 164 assertEquals(families.length, result.size()); 165 for (int j = 0; j < families.length; j++) { 166 assertTrue(CellUtil.matchingRows(raw[j], row)); 167 assertTrue(CellUtil.matchingFamily(raw[j], families[j])); 168 assertTrue(CellUtil.matchingQualifier(raw[j], qf)); 169 } 170 } 171 } 172 173 static class StaleStorefileRefresherChore extends StorefileRefresherChore { 174 boolean isStale = false; 175 176 public StaleStorefileRefresherChore(int period, HRegionServer regionServer, 177 Stoppable stoppable) { 178 super(period, false, regionServer, stoppable); 179 } 180 181 @Override 182 protected boolean isRegionStale(String encodedName, long time) { 183 return isStale; 184 } 185 } 186 187 @Test 188 public void testIsStale() throws IOException { 189 int period = 0; 190 byte[][] families = new byte[][] { Bytes.toBytes("cf") }; 191 byte[] qf = Bytes.toBytes("cq"); 192 193 HRegionServer regionServer = mock(HRegionServer.class); 194 List<HRegion> regions = new ArrayList<>(); 195 when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions); 196 when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); 197 198 TableDescriptor htd = getTableDesc(TableName.valueOf(name.getMethodName()), 2, families); 199 HRegion primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0); 200 HRegion replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1); 201 regions.add(primary); 202 regions.add(replica1); 203 204 StaleStorefileRefresherChore chore = 205 new StaleStorefileRefresherChore(period, regionServer, new StoppableImplementation()); 206 207 // write some data to primary and flush 208 putData(primary, 0, 100, qf, families); 209 primary.flush(true); 210 verifyData(primary, 0, 100, qf, families); 211 212 verifyDataExpectFail(replica1, 0, 100, qf, families); 213 chore.chore(); 214 verifyData(replica1, 0, 100, qf, families); 215 216 // simulate an fs failure where we cannot refresh the store files for the replica 217 ((FailingHRegionFileSystem) replica1.getRegionFileSystem()).fail = true; 218 219 // write some more data to primary and flush 220 putData(primary, 100, 100, qf, families); 221 primary.flush(true); 222 verifyData(primary, 0, 200, qf, families); 223 224 chore.chore(); // should not throw ex, but we cannot refresh the store files 225 226 verifyData(replica1, 0, 100, qf, families); 227 verifyDataExpectFail(replica1, 100, 100, qf, families); 228 229 chore.isStale = true; 230 chore.chore(); // now after this, we cannot read back any value 231 try { 232 verifyData(replica1, 0, 100, qf, families); 233 fail("should have failed with IOException"); 234 } catch (IOException ex) { 235 // expected 236 } 237 } 238}