001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertTrue;
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.Collections;
025import java.util.List;
026import org.apache.hadoop.conf.Configuration;
027import org.apache.hadoop.fs.FileSystem;
028import org.apache.hadoop.fs.Path;
029import org.apache.hadoop.hbase.Cell;
030import org.apache.hadoop.hbase.CellComparatorImpl;
031import org.apache.hadoop.hbase.HBaseClassTestRule;
032import org.apache.hadoop.hbase.HBaseTestingUtility;
033import org.apache.hadoop.hbase.HConstants;
034import org.apache.hadoop.hbase.MemoryCompactionPolicy;
035import org.apache.hadoop.hbase.PrivateCellUtil;
036import org.apache.hadoop.hbase.TableName;
037import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
038import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
039import org.apache.hadoop.hbase.client.RegionInfo;
040import org.apache.hadoop.hbase.client.RegionInfoBuilder;
041import org.apache.hadoop.hbase.client.Scan;
042import org.apache.hadoop.hbase.client.TableDescriptor;
043import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
044import org.apache.hadoop.hbase.io.hfile.BlockCache;
045import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
046import org.apache.hadoop.hbase.testclassification.MediumTests;
047import org.apache.hadoop.hbase.util.Bytes;
048import org.apache.hadoop.hbase.util.CommonFSUtils;
049import org.apache.hadoop.hbase.util.FSUtils;
050import org.apache.hadoop.hbase.wal.WAL;
051import org.apache.hadoop.hbase.wal.WALEdit;
052import org.apache.hadoop.hbase.wal.WALFactory;
053import org.apache.hadoop.hbase.wal.WALKey;
054import org.apache.hadoop.hbase.wal.WALSplitUtil;
055import org.junit.BeforeClass;
056import org.junit.ClassRule;
057import org.junit.Rule;
058import org.junit.Test;
059import org.junit.experimental.categories.Category;
060import org.junit.rules.TestName;
061import org.slf4j.Logger;
062import org.slf4j.LoggerFactory;
063
064/**
065 * Tests around replay of recovered.edits content.
066 */
067@Category({MediumTests.class})
068public class TestRecoveredEdits {
069
070  @ClassRule
071  public static final HBaseClassTestRule CLASS_RULE =
072      HBaseClassTestRule.forClass(TestRecoveredEdits.class);
073
074  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
075  private static final Logger LOG = LoggerFactory.getLogger(TestRecoveredEdits.class);
076
077  private static BlockCache blockCache;
078
079  @Rule public TestName testName = new TestName();
080
081  /**
082   * Path to a recovered.edits file in hbase-server test resources folder.
083   * This is a little fragile getting this path to a file of 10M of edits.
084   */
085  @SuppressWarnings("checkstyle:VisibilityModifier")
086  public static final Path RECOVEREDEDITS_PATH = new Path(
087    System.getProperty("test.build.classes", "target/test-classes"),
088    "0000000000000016310");
089
090  /**
091   * Name of table referenced by edits in the recovered.edits file.
092   */
093  public static final String RECOVEREDEDITS_TABLENAME = "IntegrationTestBigLinkedList";
094
095  /**
096   * Column family referenced by edits in the recovered.edits file.
097   */
098  public static final byte [] RECOVEREDEDITS_COLUMNFAMILY = Bytes.toBytes("meta");
099  public static final byte[][] RECOVEREDITS_COLUMNFAMILY_ARRAY =
100    new byte[][] {RECOVEREDEDITS_COLUMNFAMILY};
101  public static final ColumnFamilyDescriptor RECOVEREDEDITS_CFD =
102    ColumnFamilyDescriptorBuilder.newBuilder(RECOVEREDEDITS_COLUMNFAMILY).build();
103
104  /**
105   * Name of table mentioned edits from recovered.edits
106   */
107  @BeforeClass
108  public static void setUpBeforeClass() throws Exception {
109    blockCache = BlockCacheFactory.createBlockCache(TEST_UTIL.getConfiguration());
110  }
111
112  /**
113   * HBASE-12782 ITBLL fails for me if generator does anything but 5M per maptask.
114   * Create a region. Close it. Then copy into place a file to replay, one that is bigger than
115   * configured flush size so we bring on lots of flushes.  Then reopen and confirm all edits
116   * made it in.
117   */
118  @Test
119  public void testReplayWorksThoughLotsOfFlushing() throws
120      IOException {
121    for(MemoryCompactionPolicy policy : MemoryCompactionPolicy.values()) {
122      testReplayWorksWithMemoryCompactionPolicy(policy);
123    }
124  }
125
126  private void testReplayWorksWithMemoryCompactionPolicy(MemoryCompactionPolicy policy) throws
127    IOException {
128    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
129    // Set it so we flush every 1M or so.  Thats a lot.
130    conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
131    conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(policy).toLowerCase());
132    TableDescriptor tableDescriptor = TableDescriptorBuilder.
133      newBuilder(TableName.valueOf(testName.getMethodName())).
134      setColumnFamily(RECOVEREDEDITS_CFD) .build();
135    RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
136    final String encodedRegionName = hri.getEncodedName();
137    Path hbaseRootDir = TEST_UTIL.getDataTestDir();
138    FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
139    Path tableDir = CommonFSUtils.getTableDir(hbaseRootDir, tableDescriptor.getTableName());
140    HRegionFileSystem hrfs =
141        new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tableDir, hri);
142    if (fs.exists(hrfs.getRegionDir())) {
143      LOG.info("Region directory already exists. Deleting.");
144      fs.delete(hrfs.getRegionDir(), true);
145    }
146    HRegion region = HBaseTestingUtility
147        .createRegionAndWAL(hri, hbaseRootDir, conf, tableDescriptor, blockCache);
148    assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
149    List<String> storeFiles = region.getStoreFileList(RECOVEREDITS_COLUMNFAMILY_ARRAY);
150    // There should be no store files.
151    assertTrue(storeFiles.isEmpty());
152    region.close();
153    Path regionDir = FSUtils.getRegionDirFromRootDir(hbaseRootDir, hri);
154    Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regionDir);
155    // Copy this file under the region's recovered.edits dir so it is replayed on reopen.
156    Path destination = new Path(recoveredEditsDir, RECOVEREDEDITS_PATH.getName());
157    fs.copyToLocalFile(RECOVEREDEDITS_PATH, destination);
158    assertTrue(fs.exists(destination));
159    // Now the file 0000000000000016310 is under recovered.edits, reopen the region to replay.
160    region = HRegion.openHRegion(region, null);
161    assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
162    storeFiles = region.getStoreFileList(RECOVEREDITS_COLUMNFAMILY_ARRAY);
163    // Our 0000000000000016310 is 10MB. Most of the edits are for one region. Lets assume that if
164    // we flush at 1MB, that there are at least 3 flushed files that are there because of the
165    // replay of edits.
166    if(policy == MemoryCompactionPolicy.EAGER || policy == MemoryCompactionPolicy.ADAPTIVE) {
167      assertTrue("Files count=" + storeFiles.size(), storeFiles.size() >= 1);
168    } else {
169      assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10);
170    }
171    // Now verify all edits made it into the region.
172    int count = verifyAllEditsMadeItIn(fs, conf, RECOVEREDEDITS_PATH, region);
173    assertTrue(count > 0);
174    LOG.info("Checked " + count + " edits made it in");
175  }
176
177  /**
178   * @return Return how many edits seen.
179   */
180  // Used by TestWALPlayer over in hbase-mapreduce too.
181  public static int verifyAllEditsMadeItIn(final FileSystem fs, final Configuration conf,
182      final Path edits, final HRegion region) throws IOException {
183    int count = 0;
184    // Read all cells from recover edits
185    List<Cell> walCells = new ArrayList<>();
186    try (WAL.Reader reader = WALFactory.createReader(fs, edits, conf)) {
187      WAL.Entry entry;
188      while ((entry = reader.next()) != null) {
189        WALKey key = entry.getKey();
190        WALEdit val = entry.getEdit();
191        count++;
192        // Check this edit is for this region.
193        if (!Bytes.equals(key.getEncodedRegionName(),
194            region.getRegionInfo().getEncodedNameAsBytes())) {
195          continue;
196        }
197        Cell previous = null;
198        for (Cell cell : val.getCells()) {
199          if (WALEdit.isMetaEditFamily(cell)) {
200            continue;
201          }
202          if (previous != null && CellComparatorImpl.COMPARATOR.compareRows(previous, cell) == 0) {
203            continue;
204          }
205          previous = cell;
206          walCells.add(cell);
207        }
208      }
209    }
210
211    // Read all cells from region
212    List<Cell> regionCells = new ArrayList<>();
213    try (RegionScanner scanner = region.getScanner(new Scan())) {
214      List<Cell> tmpCells;
215      do {
216        tmpCells = new ArrayList<>();
217        scanner.nextRaw(tmpCells);
218        regionCells.addAll(tmpCells);
219      } while (!tmpCells.isEmpty());
220    }
221
222    Collections.sort(walCells, CellComparatorImpl.COMPARATOR);
223    int found = 0;
224    for (int i = 0, j = 0; i < walCells.size() && j < regionCells.size(); ) {
225      int compareResult = PrivateCellUtil
226          .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, walCells.get(i),
227              regionCells.get(j));
228      if (compareResult == 0) {
229        i++;
230        j++;
231        found++;
232      } else if (compareResult > 0) {
233        j++;
234      } else {
235        i++;
236      }
237    }
238    assertEquals("Only found " + found + " cells in region, but there are " + walCells.size() +
239        " cells in recover edits", found, walCells.size());
240    return count;
241  }
242}