001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertTrue;
022
023import java.io.IOException;
024import java.util.ArrayList;
025import java.util.Collections;
026import java.util.List;
027import org.apache.hadoop.conf.Configuration;
028import org.apache.hadoop.fs.FileSystem;
029import org.apache.hadoop.fs.Path;
030import org.apache.hadoop.hbase.Cell;
031import org.apache.hadoop.hbase.CellComparatorImpl;
032import org.apache.hadoop.hbase.HBaseClassTestRule;
033import org.apache.hadoop.hbase.HBaseTestingUtility;
034import org.apache.hadoop.hbase.HConstants;
035import org.apache.hadoop.hbase.HRegionInfo;
036import org.apache.hadoop.hbase.MemoryCompactionPolicy;
037import org.apache.hadoop.hbase.PrivateCellUtil;
038import org.apache.hadoop.hbase.TableName;
039import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
040import org.apache.hadoop.hbase.client.RegionInfo;
041import org.apache.hadoop.hbase.client.Scan;
042import org.apache.hadoop.hbase.client.TableDescriptor;
043import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
044import org.apache.hadoop.hbase.io.hfile.BlockCache;
045import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
046import org.apache.hadoop.hbase.testclassification.MediumTests;
047import org.apache.hadoop.hbase.util.Bytes;
048import org.apache.hadoop.hbase.util.CommonFSUtils;
049import org.apache.hadoop.hbase.util.FSUtils;
050import org.apache.hadoop.hbase.wal.WAL;
051import org.apache.hadoop.hbase.wal.WALEdit;
052import org.apache.hadoop.hbase.wal.WALFactory;
053import org.apache.hadoop.hbase.wal.WALKey;
054import org.apache.hadoop.hbase.wal.WALSplitUtil;
055import org.junit.BeforeClass;
056import org.junit.ClassRule;
057import org.junit.Rule;
058import org.junit.Test;
059import org.junit.experimental.categories.Category;
060import org.junit.rules.TestName;
061import org.slf4j.Logger;
062import org.slf4j.LoggerFactory;
063
064/**
065 * Tests around replay of recovered.edits content.
066 */
067@Category({MediumTests.class})
068public class TestRecoveredEdits {
069
070  @ClassRule
071  public static final HBaseClassTestRule CLASS_RULE =
072      HBaseClassTestRule.forClass(TestRecoveredEdits.class);
073
074  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
075  private static final Logger LOG = LoggerFactory.getLogger(TestRecoveredEdits.class);
076
077  private static BlockCache blockCache;
078
079  @Rule public TestName testName = new TestName();
080
081  @BeforeClass
082  public static void setUpBeforeClass() throws Exception {
083    blockCache = BlockCacheFactory.createBlockCache(TEST_UTIL.getConfiguration());
084  }
085
086  /**
087   * HBASE-12782 ITBLL fails for me if generator does anything but 5M per maptask.
088   * Create a region. Close it. Then copy into place a file to replay, one that is bigger than
089   * configured flush size so we bring on lots of flushes.  Then reopen and confirm all edits
090   * made it in.
091   * @throws IOException
092   */
093  @Test
094  public void testReplayWorksThoughLotsOfFlushing() throws
095      IOException {
096    for(MemoryCompactionPolicy policy : MemoryCompactionPolicy.values()) {
097      testReplayWorksWithMemoryCompactionPolicy(policy);
098    }
099  }
100
101  private void testReplayWorksWithMemoryCompactionPolicy(MemoryCompactionPolicy policy) throws
102    IOException {
103    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
104    // Set it so we flush every 1M or so.  Thats a lot.
105    conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
106    conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(policy).toLowerCase());
107    // The file of recovered edits has a column family of 'meta'. Also has an encoded regionname
108    // of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay.
109    final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f";
110    final String columnFamily = "meta";
111    byte [][] columnFamilyAsByteArray = new byte [][] {Bytes.toBytes(columnFamily)};
112    TableDescriptor tableDescriptor =
113        TableDescriptorBuilder.newBuilder(TableName.valueOf(testName.getMethodName()))
114            .setColumnFamily(
115                ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(columnFamily)).build())
116            .build();
117    RegionInfo hri = new HRegionInfo(tableDescriptor.getTableName()) {
118      @Override
119      public synchronized String getEncodedName() {
120        return encodedRegionName;
121      }
122
123      // Cache the name because lots of lookups.
124      private byte[] encodedRegionNameAsBytes = null;
125
126      @Override
127      public synchronized byte[] getEncodedNameAsBytes() {
128        if (encodedRegionNameAsBytes == null) {
129          this.encodedRegionNameAsBytes = Bytes.toBytes(getEncodedName());
130        }
131        return this.encodedRegionNameAsBytes;
132      }
133    };
134    Path hbaseRootDir = TEST_UTIL.getDataTestDir();
135    FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
136    Path tableDir = CommonFSUtils.getTableDir(hbaseRootDir, tableDescriptor.getTableName());
137    HRegionFileSystem hrfs =
138        new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tableDir, hri);
139    if (fs.exists(hrfs.getRegionDir())) {
140      LOG.info("Region directory already exists. Deleting.");
141      fs.delete(hrfs.getRegionDir(), true);
142    }
143    HRegion region = HBaseTestingUtility
144        .createRegionAndWAL(hri, hbaseRootDir, conf, tableDescriptor, blockCache);
145    assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
146    List<String> storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
147    // There should be no store files.
148    assertTrue(storeFiles.isEmpty());
149    region.close();
150    Path regionDir = FSUtils.getRegionDirFromRootDir(hbaseRootDir, hri);
151    Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regionDir);
152    // This is a little fragile getting this path to a file of 10M of edits.
153    Path recoveredEditsFile = new Path(
154      System.getProperty("test.build.classes", "target/test-classes"),
155        "0000000000000016310");
156    // Copy this file under the region's recovered.edits dir so it is replayed on reopen.
157    Path destination = new Path(recoveredEditsDir, recoveredEditsFile.getName());
158    fs.copyToLocalFile(recoveredEditsFile, destination);
159    assertTrue(fs.exists(destination));
160    // Now the file 0000000000000016310 is under recovered.edits, reopen the region to replay.
161    region = HRegion.openHRegion(region, null);
162    assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
163    storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
164    // Our 0000000000000016310 is 10MB. Most of the edits are for one region. Lets assume that if
165    // we flush at 1MB, that there are at least 3 flushed files that are there because of the
166    // replay of edits.
167    if(policy == MemoryCompactionPolicy.EAGER || policy == MemoryCompactionPolicy.ADAPTIVE) {
168      assertTrue("Files count=" + storeFiles.size(), storeFiles.size() >= 1);
169    } else {
170      assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10);
171    }
172    // Now verify all edits made it into the region.
173    int count = verifyAllEditsMadeItIn(fs, conf, recoveredEditsFile, region);
174    LOG.info("Checked " + count + " edits made it in");
175  }
176
177  /**
178   * @param fs
179   * @param conf
180   * @param edits
181   * @param region
182   * @return Return how many edits seen.
183   * @throws IOException
184   */
185  private int verifyAllEditsMadeItIn(final FileSystem fs, final Configuration conf,
186      final Path edits, final HRegion region) throws IOException {
187    int count = 0;
188    // Read all cells from recover edits
189    List<Cell> walCells = new ArrayList<>();
190    try (WAL.Reader reader = WALFactory.createReader(fs, edits, conf)) {
191      WAL.Entry entry;
192      while ((entry = reader.next()) != null) {
193        WALKey key = entry.getKey();
194        WALEdit val = entry.getEdit();
195        count++;
196        // Check this edit is for this region.
197        if (!Bytes.equals(key.getEncodedRegionName(),
198            region.getRegionInfo().getEncodedNameAsBytes())) {
199          continue;
200        }
201        Cell previous = null;
202        for (Cell cell : val.getCells()) {
203          if (WALEdit.isMetaEditFamily(cell)) {
204            continue;
205          }
206          if (previous != null && CellComparatorImpl.COMPARATOR.compareRows(previous, cell) == 0) {
207            continue;
208          }
209          previous = cell;
210          walCells.add(cell);
211        }
212      }
213    }
214
215    // Read all cells from region
216    List<Cell> regionCells = new ArrayList<>();
217    try (RegionScanner scanner = region.getScanner(new Scan())) {
218      List<Cell> tmpCells;
219      do {
220        tmpCells = new ArrayList<>();
221        scanner.nextRaw(tmpCells);
222        regionCells.addAll(tmpCells);
223      } while (!tmpCells.isEmpty());
224    }
225
226    Collections.sort(walCells, CellComparatorImpl.COMPARATOR);
227    int found = 0;
228    for (int i = 0, j = 0; i < walCells.size() && j < regionCells.size(); ) {
229      int compareResult = PrivateCellUtil
230          .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, walCells.get(i),
231              regionCells.get(j));
232      if (compareResult == 0) {
233        i++;
234        j++;
235        found++;
236      } else if (compareResult > 0) {
237        j++;
238      } else {
239        i++;
240      }
241    }
242    assertEquals("Only found " + found + " cells in region, but there are " + walCells.size() +
243        " cells in recover edits", found, walCells.size());
244    return count;
245  }
246}