001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.fail; 021 022import java.io.IOException; 023import java.util.ArrayList; 024import java.util.Arrays; 025import java.util.List; 026import java.util.concurrent.CountDownLatch; 027import org.apache.hadoop.conf.Configuration; 028import org.apache.hadoop.fs.FileSystem; 029import org.apache.hadoop.fs.Path; 030import org.apache.hadoop.hbase.HBaseClassTestRule; 031import org.apache.hadoop.hbase.HBaseTestingUtility; 032import org.apache.hadoop.hbase.HConstants; 033import org.apache.hadoop.hbase.TableName; 034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 035import org.apache.hadoop.hbase.client.Increment; 036import org.apache.hadoop.hbase.client.Mutation; 037import org.apache.hadoop.hbase.client.Put; 038import org.apache.hadoop.hbase.client.RegionInfo; 039import org.apache.hadoop.hbase.client.RegionInfoBuilder; 040import org.apache.hadoop.hbase.client.TableDescriptor; 041import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 042import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; 043import org.apache.hadoop.hbase.testclassification.RegionServerTests; 044import org.apache.hadoop.hbase.testclassification.SmallTests; 045import org.apache.hadoop.hbase.util.Bytes; 046import org.apache.hadoop.hbase.util.CommonFSUtils; 047import org.apache.hadoop.hbase.wal.WAL; 048import org.apache.hadoop.hbase.wal.WALEdit; 049import org.apache.hadoop.hbase.wal.WALFactory; 050import org.junit.After; 051import org.junit.AfterClass; 052import org.junit.Before; 053import org.junit.ClassRule; 054import org.junit.Rule; 055import org.junit.Test; 056import org.junit.experimental.categories.Category; 057import org.junit.rules.TestName; 058import org.junit.runner.RunWith; 059import org.junit.runners.Parameterized; 060import org.junit.runners.Parameterized.Parameter; 061import org.junit.runners.Parameterized.Parameters; 062import org.slf4j.Logger; 063import org.slf4j.LoggerFactory; 064 065/** 066 * Test for HBASE-17471. 067 * <p> 068 * MVCCPreAssign is added by HBASE-16698, but pre-assign mvcc is only used in put/delete path. Other 069 * write paths like increment/append still assign mvcc in ringbuffer's consumer thread. If put and 070 * increment are used parallel. Then seqid in WAL may not increase monotonically Disorder in wals 071 * will lead to data loss. 072 * <p> 073 * This case use two thread to put and increment at the same time in a single region. Then check the 074 * seqid in WAL. If seqid is wal is not monotonically increasing, this case will fail 075 */ 076@RunWith(Parameterized.class) 077@Category({ RegionServerTests.class, SmallTests.class }) 078public class TestWALMonotonicallyIncreasingSeqId { 079 080 @ClassRule 081 public static final HBaseClassTestRule CLASS_RULE = 082 HBaseClassTestRule.forClass(TestWALMonotonicallyIncreasingSeqId.class); 083 084 private final Logger LOG = LoggerFactory.getLogger(getClass()); 085 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 086 private static Path testDir = TEST_UTIL.getDataTestDir("TestWALMonotonicallyIncreasingSeqId"); 087 private WALFactory wals; 088 private FileSystem fileSystem; 089 private Configuration walConf; 090 private HRegion region; 091 092 @Parameter 093 public String walProvider; 094 095 @Rule 096 public TestName name = new TestName(); 097 098 @Parameters(name = "{index}: wal={0}") 099 public static List<Object[]> data() { 100 return Arrays.asList(new Object[] { "asyncfs" }, new Object[] { "filesystem" }); 101 } 102 103 private TableDescriptor getTableDesc(TableName tableName, byte[]... families) { 104 TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); 105 Arrays.stream(families) 106 .map( 107 f -> ColumnFamilyDescriptorBuilder.newBuilder(f).setMaxVersions(Integer.MAX_VALUE).build()) 108 .forEachOrdered(builder::setColumnFamily); 109 return builder.build(); 110 } 111 112 private HRegion initHRegion(TableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId) 113 throws IOException { 114 Configuration conf = TEST_UTIL.getConfiguration(); 115 conf.set("hbase.wal.provider", walProvider); 116 conf.setBoolean("hbase.hregion.mvcc.preassign", false); 117 Path tableDir = CommonFSUtils.getTableDir(testDir, htd.getTableName()); 118 119 RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKey) 120 .setEndKey(stopKey).setReplicaId(replicaId).setRegionId(0).build(); 121 fileSystem = tableDir.getFileSystem(conf); 122 final Configuration walConf = new Configuration(conf); 123 CommonFSUtils.setRootDir(walConf, tableDir); 124 this.walConf = walConf; 125 wals = new WALFactory(walConf, "log_" + replicaId); 126 ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, 127 MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); 128 HRegion region = 129 HRegion.createHRegion(info, TEST_UTIL.getDefaultRootDirPath(), conf, htd, wals.getWAL(info)); 130 return region; 131 } 132 133 CountDownLatch latch = new CountDownLatch(1); 134 135 public class PutThread extends Thread { 136 HRegion region; 137 138 public PutThread(HRegion region) { 139 this.region = region; 140 } 141 142 @Override 143 public void run() { 144 try { 145 for (int i = 0; i < 100; i++) { 146 byte[] row = Bytes.toBytes("putRow" + i); 147 Put put = new Put(row); 148 put.addColumn("cf".getBytes(), Bytes.toBytes(0), Bytes.toBytes("")); 149 latch.await(); 150 region.batchMutate(new Mutation[] { put }); 151 Thread.sleep(10); 152 } 153 154 } catch (Throwable t) { 155 LOG.warn("Error happend when Increment: ", t); 156 } 157 } 158 } 159 160 public class IncThread extends Thread { 161 HRegion region; 162 163 public IncThread(HRegion region) { 164 this.region = region; 165 } 166 167 @Override 168 public void run() { 169 try { 170 for (int i = 0; i < 100; i++) { 171 byte[] row = Bytes.toBytes("incrementRow" + i); 172 Increment inc = new Increment(row); 173 inc.addColumn("cf".getBytes(), Bytes.toBytes(0), 1); 174 // inc.setDurability(Durability.ASYNC_WAL); 175 region.increment(inc); 176 latch.countDown(); 177 Thread.sleep(10); 178 } 179 180 } catch (Throwable t) { 181 LOG.warn("Error happend when Put: ", t); 182 } 183 } 184 } 185 186 @Before 187 public void setUp() throws IOException { 188 byte[][] families = new byte[][] { Bytes.toBytes("cf") }; 189 TableDescriptor htd = getTableDesc( 190 TableName.valueOf(name.getMethodName().replaceAll("[^0-9A-Za-z_]", "_")), families); 191 region = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0); 192 } 193 194 @After 195 public void tearDown() throws IOException { 196 if (region != null) { 197 region.close(); 198 } 199 } 200 201 @AfterClass 202 public static void tearDownAfterClass() throws IOException { 203 TEST_UTIL.cleanupTestDir(); 204 } 205 206 private WAL.Reader createReader(Path logPath, Path oldWalsDir) throws IOException { 207 try { 208 return wals.createReader(fileSystem, logPath); 209 } catch (IOException e) { 210 return wals.createReader(fileSystem, new Path(oldWalsDir, logPath.getName())); 211 } 212 } 213 214 @Test 215 public void testWALMonotonicallyIncreasingSeqId() throws Exception { 216 List<Thread> putThreads = new ArrayList<>(); 217 for (int i = 0; i < 1; i++) { 218 putThreads.add(new PutThread(region)); 219 } 220 IncThread incThread = new IncThread(region); 221 for (int i = 0; i < 1; i++) { 222 putThreads.get(i).start(); 223 } 224 incThread.start(); 225 incThread.join(); 226 227 Path logPath = ((AbstractFSWAL<?>) region.getWAL()).getCurrentFileName(); 228 region.getWAL().rollWriter(); 229 Thread.sleep(10); 230 Path hbaseDir = new Path(walConf.get(HConstants.HBASE_DIR)); 231 Path oldWalsDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME); 232 try (WAL.Reader reader = createReader(logPath, oldWalsDir)) { 233 long currentMaxSeqid = 0; 234 for (WAL.Entry e; (e = reader.next()) != null;) { 235 if (!WALEdit.isMetaEditFamily(e.getEdit().getCells().get(0))) { 236 long currentSeqid = e.getKey().getSequenceId(); 237 if (currentSeqid > currentMaxSeqid) { 238 currentMaxSeqid = currentSeqid; 239 } else { 240 fail("Current max Seqid is " + currentMaxSeqid 241 + ", but the next seqid in wal is smaller:" + currentSeqid); 242 } 243 } 244 } 245 } 246 } 247}