001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertTrue; 022import static org.junit.Assert.fail; 023import static org.mockito.Mockito.mock; 024import static org.mockito.Mockito.when; 025 026import java.io.IOException; 027import java.util.ArrayList; 028import java.util.Arrays; 029import java.util.Collection; 030import java.util.List; 031import org.apache.hadoop.conf.Configuration; 032import org.apache.hadoop.fs.FileSystem; 033import org.apache.hadoop.fs.Path; 034import org.apache.hadoop.hbase.Cell; 035import org.apache.hadoop.hbase.CellUtil; 036import org.apache.hadoop.hbase.HBaseClassTestRule; 037import org.apache.hadoop.hbase.HBaseTestingUtility; 038import org.apache.hadoop.hbase.HConstants; 039import org.apache.hadoop.hbase.Stoppable; 040import org.apache.hadoop.hbase.TableName; 041import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 042import org.apache.hadoop.hbase.client.Durability; 043import org.apache.hadoop.hbase.client.Get; 044import org.apache.hadoop.hbase.client.Put; 045import org.apache.hadoop.hbase.client.RegionInfo; 046import org.apache.hadoop.hbase.client.RegionInfoBuilder; 047import org.apache.hadoop.hbase.client.Result; 048import org.apache.hadoop.hbase.client.TableDescriptor; 049import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 050import org.apache.hadoop.hbase.testclassification.MediumTests; 051import org.apache.hadoop.hbase.testclassification.RegionServerTests; 052import org.apache.hadoop.hbase.util.Bytes; 053import org.apache.hadoop.hbase.util.CommonFSUtils; 054import org.apache.hadoop.hbase.util.StoppableImplementation; 055import org.apache.hadoop.hbase.wal.WALFactory; 056import org.junit.Before; 057import org.junit.ClassRule; 058import org.junit.Rule; 059import org.junit.Test; 060import org.junit.experimental.categories.Category; 061import org.junit.rules.TestName; 062 063@Category({RegionServerTests.class, MediumTests.class}) 064public class TestStoreFileRefresherChore { 065 066 @ClassRule 067 public static final HBaseClassTestRule CLASS_RULE = 068 HBaseClassTestRule.forClass(TestStoreFileRefresherChore.class); 069 070 private HBaseTestingUtility TEST_UTIL; 071 private Path testDir; 072 073 @Rule 074 public TestName name = new TestName(); 075 076 @Before 077 public void setUp() throws IOException { 078 TEST_UTIL = new HBaseTestingUtility(); 079 testDir = TEST_UTIL.getDataTestDir("TestStoreFileRefresherChore"); 080 CommonFSUtils.setRootDir(TEST_UTIL.getConfiguration(), testDir); 081 } 082 083 private TableDescriptor getTableDesc(TableName tableName, int regionReplication, 084 byte[]... families) { 085 TableDescriptorBuilder builder = 086 TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication); 087 Arrays.stream(families).map(family -> ColumnFamilyDescriptorBuilder.newBuilder(family) 088 .setMaxVersions(Integer.MAX_VALUE).build()).forEachOrdered(builder::setColumnFamily); 089 return builder.build(); 090 } 091 092 static class FailingHRegionFileSystem extends HRegionFileSystem { 093 boolean fail = false; 094 095 FailingHRegionFileSystem(Configuration conf, FileSystem fs, Path tableDir, 096 RegionInfo regionInfo) { 097 super(conf, fs, tableDir, regionInfo); 098 } 099 100 @Override 101 public Collection<StoreFileInfo> getStoreFiles(String familyName) throws IOException { 102 if (fail) { 103 throw new IOException("simulating FS failure"); 104 } 105 return super.getStoreFiles(familyName); 106 } 107 } 108 109 private HRegion initHRegion(TableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId) 110 throws IOException { 111 Configuration conf = TEST_UTIL.getConfiguration(); 112 Path tableDir = CommonFSUtils.getTableDir(testDir, htd.getTableName()); 113 114 RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKey) 115 .setEndKey(stopKey).setRegionId(0L).setReplicaId(replicaId).build(); 116 HRegionFileSystem fs = 117 new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info); 118 final Configuration walConf = new Configuration(conf); 119 CommonFSUtils.setRootDir(walConf, tableDir); 120 final WALFactory wals = new WALFactory(walConf, "log_" + replicaId); 121 ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 122 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); 123 HRegion region = 124 new HRegion(fs, wals.getWAL(info), 125 conf, htd, null); 126 127 region.initialize(); 128 129 return region; 130 } 131 132 private void putData(Region region, int startRow, int numRows, byte[] qf, byte[]... families) 133 throws IOException { 134 for (int i = startRow; i < startRow + numRows; i++) { 135 Put put = new Put(Bytes.toBytes("" + i)); 136 put.setDurability(Durability.SKIP_WAL); 137 for (byte[] family : families) { 138 put.addColumn(family, qf, null); 139 } 140 region.put(put); 141 } 142 } 143 144 private void verifyDataExpectFail(Region newReg, int startRow, int numRows, byte[] qf, 145 byte[]... families) throws IOException { 146 boolean threw = false; 147 try { 148 verifyData(newReg, startRow, numRows, qf, families); 149 } catch (AssertionError e) { 150 threw = true; 151 } 152 if (!threw) { 153 fail("Expected data verification to fail"); 154 } 155 } 156 157 private void verifyData(Region newReg, int startRow, int numRows, byte[] qf, byte[]... families) 158 throws IOException { 159 for (int i = startRow; i < startRow + numRows; i++) { 160 byte[] row = Bytes.toBytes("" + i); 161 Get get = new Get(row); 162 for (byte[] family : families) { 163 get.addColumn(family, qf); 164 } 165 Result result = newReg.get(get); 166 Cell[] raw = result.rawCells(); 167 assertEquals(families.length, result.size()); 168 for (int j = 0; j < families.length; j++) { 169 assertTrue(CellUtil.matchingRows(raw[j], row)); 170 assertTrue(CellUtil.matchingFamily(raw[j], families[j])); 171 assertTrue(CellUtil.matchingQualifier(raw[j], qf)); 172 } 173 } 174 } 175 176 static class StaleStorefileRefresherChore extends StorefileRefresherChore { 177 boolean isStale = false; 178 public StaleStorefileRefresherChore(int period, HRegionServer regionServer, 179 Stoppable stoppable) { 180 super(period, false, regionServer, stoppable); 181 } 182 @Override 183 protected boolean isRegionStale(String encodedName, long time) { 184 return isStale; 185 } 186 } 187 188 @Test 189 public void testIsStale() throws IOException { 190 int period = 0; 191 byte[][] families = new byte[][] {Bytes.toBytes("cf")}; 192 byte[] qf = Bytes.toBytes("cq"); 193 194 HRegionServer regionServer = mock(HRegionServer.class); 195 List<HRegion> regions = new ArrayList<>(); 196 when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions); 197 when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); 198 199 TableDescriptor htd = getTableDesc(TableName.valueOf(name.getMethodName()), 2, families); 200 HRegion primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0); 201 HRegion replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1); 202 regions.add(primary); 203 regions.add(replica1); 204 205 StaleStorefileRefresherChore chore = new StaleStorefileRefresherChore(period, regionServer, 206 new StoppableImplementation()); 207 208 // write some data to primary and flush 209 putData(primary, 0, 100, qf, families); 210 primary.flush(true); 211 verifyData(primary, 0, 100, qf, families); 212 213 verifyDataExpectFail(replica1, 0, 100, qf, families); 214 chore.chore(); 215 verifyData(replica1, 0, 100, qf, families); 216 217 // simulate an fs failure where we cannot refresh the store files for the replica 218 ((FailingHRegionFileSystem)replica1.getRegionFileSystem()).fail = true; 219 220 // write some more data to primary and flush 221 putData(primary, 100, 100, qf, families); 222 primary.flush(true); 223 verifyData(primary, 0, 200, qf, families); 224 225 chore.chore(); // should not throw ex, but we cannot refresh the store files 226 227 verifyData(replica1, 0, 100, qf, families); 228 verifyDataExpectFail(replica1, 100, 100, qf, families); 229 230 chore.isStale = true; 231 chore.chore(); //now after this, we cannot read back any value 232 try { 233 verifyData(replica1, 0, 100, qf, families); 234 fail("should have failed with IOException"); 235 } catch(IOException ex) { 236 // expected 237 } 238 } 239}