001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.junit.Assert.assertEquals;
021
022import java.io.FileNotFoundException;
023import java.io.IOException;
024import java.util.List;
025import org.apache.hadoop.conf.Configuration;
026import org.apache.hadoop.hbase.HBaseClassTestRule;
027import org.apache.hadoop.hbase.HBaseTestingUtility;
028import org.apache.hadoop.hbase.TableName;
029import org.apache.hadoop.hbase.Waiter;
030import org.apache.hadoop.hbase.client.Admin;
031import org.apache.hadoop.hbase.client.CompactionState;
032import org.apache.hadoop.hbase.client.Put;
033import org.apache.hadoop.hbase.client.Table;
034import org.apache.hadoop.hbase.testclassification.MediumTests;
035import org.apache.hadoop.hbase.util.Bytes;
036import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
037import org.junit.After;
038import org.junit.AfterClass;
039import org.junit.Assert;
040import org.junit.BeforeClass;
041import org.junit.ClassRule;
042import org.junit.Test;
043import org.junit.experimental.categories.Category;
044import org.slf4j.Logger;
045import org.slf4j.LoggerFactory;
046
047/**
048 * This class tests the scenario where a store refresh happens due to a file not found during scan,
049 * after a compaction but before the compacted files are archived. At this state we test for a split
050 * and compaction
051 */
052@Category(MediumTests.class)
053public class TestCompactionFileNotFound {
054
055  @ClassRule
056  public static final HBaseClassTestRule CLASS_RULE =
057      HBaseClassTestRule.forClass(TestCompactionFileNotFound.class);
058
059  private static final Logger LOG = LoggerFactory.getLogger(TestCompactionFileNotFound.class);
060  private static final HBaseTestingUtility util = new HBaseTestingUtility();
061
062  private static final TableName TEST_TABLE = TableName.valueOf("test");
063  private static final byte[] TEST_FAMILY = Bytes.toBytes("f1");
064
065  private static final byte[] ROW_A = Bytes.toBytes("aaa");
066  private static final byte[] ROW_B = Bytes.toBytes("bbb");
067  private static final byte[] ROW_C = Bytes.toBytes("ccc");
068
069  private static final byte[] qualifierCol1 = Bytes.toBytes("col1");
070
071  private static final byte[] bytes1 = Bytes.toBytes(1);
072  private static final byte[] bytes2 = Bytes.toBytes(2);
073  private static final byte[] bytes3 = Bytes.toBytes(3);
074
075  private Table table;
076
077  @BeforeClass
078  public static void setupBeforeClass() throws Exception {
079    Configuration conf = util.getConfiguration();
080    conf.setInt("hbase.hfile.compaction.discharger.interval",
081      Integer.MAX_VALUE);
082    util.startMiniCluster(3);
083  }
084
085  @AfterClass
086  public static void tearDownAfterClass() throws Exception {
087    util.shutdownMiniCluster();
088  }
089
090  @After
091  public void after() throws Exception {
092    try {
093      if (table != null) {
094        table.close();
095      }
096    } finally {
097      util.deleteTable(TEST_TABLE);
098    }
099  }
100
101  @Test
102  public void testSplitAfterRefresh() throws Exception {
103    Admin admin = util.getAdmin();
104    table = util.createTable(TEST_TABLE, TEST_FAMILY);
105
106    try {
107      // Create Multiple store files
108      Put puta = new Put(ROW_A);
109      puta.addColumn(TEST_FAMILY, qualifierCol1, bytes1);
110      table.put(puta);
111      admin.flush(TEST_TABLE);
112
113      Put putb = new Put(ROW_B);
114      putb.addColumn(TEST_FAMILY, qualifierCol1, bytes2);
115      table.put(putb);
116      admin.flush(TEST_TABLE);
117
118      Put putc = new Put(ROW_C);
119      putc.addColumn(TEST_FAMILY, qualifierCol1, bytes3);
120      table.put(putc);
121      admin.flush(TEST_TABLE);
122
123      admin.compact(TEST_TABLE);
124      while (admin.getCompactionState(TEST_TABLE) != CompactionState.NONE) {
125        Thread.sleep(1000);
126      }
127      table.put(putb);
128      HRegion hr1 = (HRegion) util.getRSForFirstRegionInTable(TEST_TABLE)
129          .getRegionByEncodedName(admin.getTableRegions(TEST_TABLE).get(0).getEncodedName());
130      // Refresh store files post compaction, this should not open already compacted files
131      hr1.refreshStoreFiles(true);
132      int numRegionsBeforeSplit = admin.getTableRegions(TEST_TABLE).size();
133      // Check if we can successfully split after compaction
134      admin.splitRegion(admin.getTableRegions(TEST_TABLE).get(0).getEncodedNameAsBytes(), ROW_C);
135      util.waitFor(20000, new Waiter.Predicate<Exception>() {
136        @Override
137        public boolean evaluate() throws Exception {
138          int numRegionsAfterSplit = 0;
139          List<RegionServerThread> rst = util.getMiniHBaseCluster().getLiveRegionServerThreads();
140          for (RegionServerThread t : rst) {
141            numRegionsAfterSplit += t.getRegionServer().getRegions(TEST_TABLE).size();
142          }
143          // Make sure that the split went through and all the regions are assigned
144          return (numRegionsAfterSplit == numRegionsBeforeSplit + 1
145              && admin.isTableAvailable(TEST_TABLE));
146        }
147      });
148      // Split at this point should not result in the RS being aborted
149      assertEquals(3, util.getMiniHBaseCluster().getLiveRegionServerThreads().size());
150    } finally {
151      if (admin != null) {
152        admin.close();
153      }
154    }
155  }
156
157  @Test
158  public void testCompactionAfterRefresh() throws Exception {
159    Admin admin = util.getAdmin();
160    table = util.createTable(TEST_TABLE, TEST_FAMILY);
161    try {
162      // Create Multiple store files
163      Put puta = new Put(ROW_A);
164      puta.addColumn(TEST_FAMILY, qualifierCol1, bytes1);
165      table.put(puta);
166      admin.flush(TEST_TABLE);
167
168      Put putb = new Put(ROW_B);
169      putb.addColumn(TEST_FAMILY, qualifierCol1, bytes2);
170      table.put(putb);
171      admin.flush(TEST_TABLE);
172
173      Put putc = new Put(ROW_C);
174      putc.addColumn(TEST_FAMILY, qualifierCol1, bytes3);
175      table.put(putc);
176      admin.flush(TEST_TABLE);
177
178      admin.compact(TEST_TABLE);
179      while (admin.getCompactionState(TEST_TABLE) != CompactionState.NONE) {
180        Thread.sleep(1000);
181      }
182      table.put(putb);
183      HRegion hr1 = (HRegion) util.getRSForFirstRegionInTable(TEST_TABLE)
184          .getRegionByEncodedName(admin.getTableRegions(TEST_TABLE).get(0).getEncodedName());
185      // Refresh store files post compaction, this should not open already compacted files
186      hr1.refreshStoreFiles(true);
187      // Archive the store files and try another compaction to see if all is good
188      for (HStore store : hr1.getStores()) {
189        store.closeAndArchiveCompactedFiles();
190      }
191      try {
192        hr1.compact(false);
193      } catch (IOException e) {
194        LOG.error("Got an exception during compaction", e);
195        if (e instanceof FileNotFoundException) {
196          Assert.fail("Got a FNFE during compaction");
197        } else {
198          Assert.fail();
199        }
200      }
201    } finally {
202      if (admin != null) {
203        admin.close();
204      }
205    }
206  }
207}