001/*
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.regionserver;
020
021import static org.junit.Assert.assertEquals;
022import static org.junit.Assert.assertFalse;
023import static org.junit.Assert.assertNotNull;
024import static org.junit.Assert.assertTrue;
025import static org.junit.Assert.fail;
026
027import java.io.IOException;
028import java.util.Collection;
029
030import org.apache.hadoop.hbase.HBaseClassTestRule;
031import org.apache.hadoop.hbase.HBaseTestingUtility;
032import org.apache.hadoop.hbase.KeyValue;
033import org.apache.hadoop.hbase.TableName;
034import org.apache.hadoop.hbase.client.Delete;
035import org.apache.hadoop.hbase.client.Get;
036import org.apache.hadoop.hbase.client.HBaseAdmin;
037import org.apache.hadoop.hbase.client.IsolationLevel;
038import org.apache.hadoop.hbase.client.Put;
039import org.apache.hadoop.hbase.client.Result;
040import org.apache.hadoop.hbase.client.ResultScanner;
041import org.apache.hadoop.hbase.client.Scan;
042import org.apache.hadoop.hbase.client.Table;
043import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
044import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
045import org.apache.hadoop.hbase.testclassification.MediumTests;
046import org.apache.hadoop.hbase.util.Bytes;
047
048import org.junit.AfterClass;
049import org.junit.BeforeClass;
050import org.junit.ClassRule;
051import org.junit.Test;
052import org.junit.experimental.categories.Category;
053
054@Category({MediumTests.class})
055public class TestCleanupCompactedFileOnRegionClose {
056
057  @ClassRule
058  public static final HBaseClassTestRule CLASS_RULE =
059      HBaseClassTestRule.forClass(TestCleanupCompactedFileOnRegionClose.class);
060
061  private static HBaseTestingUtility util;
062
063  @BeforeClass
064  public static void beforeClass() throws Exception {
065    util = new HBaseTestingUtility();
066    util.getConfiguration().setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY,100);
067    util.getConfiguration().set("dfs.blocksize", "64000");
068    util.getConfiguration().set("dfs.namenode.fs-limits.min-block-size", "1024");
069    util.getConfiguration().set(TimeToLiveHFileCleaner.TTL_CONF_KEY,"0");
070    util.startMiniCluster(2);
071  }
072
073  @AfterClass
074  public static void afterclass() throws Exception {
075    util.shutdownMiniCluster();
076  }
077
078  @Test
079  public void testCleanupOnClose() throws Exception {
080    TableName tableName = TableName.valueOf("testCleanupOnClose");
081    String familyName = "f";
082    byte[] familyNameBytes = Bytes.toBytes(familyName);
083    util.createTable(tableName, familyName);
084
085    HBaseAdmin hBaseAdmin = util.getHBaseAdmin();
086    Table table = util.getConnection().getTable(tableName);
087
088    HRegionServer rs = util.getRSForFirstRegionInTable(tableName);
089    Region region = rs.getRegions(tableName).get(0);
090
091    int refSFCount = 4;
092    for (int i = 0; i < refSFCount; i++) {
093      for (int j = 0; j < refSFCount; j++) {
094        Put put = new Put(Bytes.toBytes(j));
095        put.addColumn(familyNameBytes, Bytes.toBytes(i), Bytes.toBytes(j));
096        table.put(put);
097      }
098      util.flush(tableName);
099    }
100    assertEquals(refSFCount, region.getStoreFileList(new byte[][]{familyNameBytes}).size());
101
102    //add a delete, to test wether we end up with an inconsistency post region close
103    Delete delete = new Delete(Bytes.toBytes(refSFCount-1));
104    table.delete(delete);
105    util.flush(tableName);
106    assertFalse(table.exists(new Get(Bytes.toBytes(refSFCount-1))));
107
108    //Create a scanner and keep it open to add references to StoreFileReaders
109    Scan scan = new Scan();
110    scan.setStopRow(Bytes.toBytes(refSFCount-2));
111    scan.setCaching(1);
112    ResultScanner scanner = table.getScanner(scan);
113    Result res = scanner.next();
114    assertNotNull(res);
115    assertEquals(refSFCount, res.getFamilyMap(familyNameBytes).size());
116
117
118    //Verify the references
119    int count = 0;
120    for (HStoreFile sf : (Collection<HStoreFile>)region.getStore(familyNameBytes).getStorefiles()) {
121      synchronized (sf) {
122        if (count < refSFCount) {
123          assertTrue(sf.isReferencedInReads());
124        } else {
125          assertFalse(sf.isReferencedInReads());
126        }
127      }
128      count++;
129    }
130
131    //Major compact to produce compacted storefiles that need to be cleaned up
132    util.compact(tableName, true);
133    assertEquals(1, region.getStoreFileList(new byte[][]{familyNameBytes}).size());
134    assertEquals(refSFCount+1,
135      ((HStore)region.getStore(familyNameBytes)).getStoreEngine().getStoreFileManager()
136          .getCompactedfiles().size());
137
138    //close then open the region to determine wether compacted storefiles get cleaned up on close
139    hBaseAdmin.unassign(region.getRegionInfo().getRegionName(), false);
140    hBaseAdmin.assign(region.getRegionInfo().getRegionName());
141    util.waitUntilNoRegionsInTransition(10000);
142
143
144    assertFalse("Deleted row should not exist",
145        table.exists(new Get(Bytes.toBytes(refSFCount-1))));
146
147    rs = util.getRSForFirstRegionInTable(tableName);
148    region = rs.getRegions(tableName).get(0);
149    assertEquals(1, region.getStoreFileList(new byte[][]{familyNameBytes}).size());
150    assertEquals(0,
151        ((HStore)region.getStore(familyNameBytes)).getStoreEngine().getStoreFileManager()
152            .getCompactedfiles().size());
153  }
154
155  @Test
156  public void testIOExceptionThrownOnClose() throws Exception {
157    byte[] filler = new byte[128000];
158    TableName tableName = TableName.valueOf("testIOExceptionThrownOnClose");
159    String familyName = "f";
160    byte[] familyNameBytes = Bytes.toBytes(familyName);
161    util.createTable(tableName, familyName);
162
163    Table table = util.getConnection().getTable(tableName);
164
165    HRegionServer rs = util.getRSForFirstRegionInTable(tableName);
166    Region region = rs.getRegions(tableName).get(0);
167
168    int refSFCount = 4;
169    for (int i = 0; i < refSFCount; i++) {
170      for (int j = 0; j < refSFCount; j++) {
171        Put put = new Put(Bytes.toBytes(j));
172        put.addColumn(familyNameBytes, Bytes.toBytes(i), filler);
173        table.put(put);
174      }
175      util.flush(tableName);
176    }
177    assertEquals(refSFCount, region.getStoreFileList(new byte[][]{familyNameBytes}).size());
178
179    HStore store = ((HRegion) region).getStore(familyNameBytes);
180    HStoreFile hsf = ((Collection<HStoreFile>)region.getStore(familyNameBytes).getStorefiles())
181        .iterator().next();
182    long readPt = ((HRegion)region).getReadPoint(IsolationLevel.READ_COMMITTED);
183    StoreFileScanner preadScanner = hsf.getPreadScanner(false, readPt, 0, false);
184    StoreFileScanner streamScanner =
185        hsf.getStreamScanner(false, false, false, readPt, 0,  false);
186    preadScanner.seek(KeyValue.LOWESTKEY);
187    streamScanner.seek(KeyValue.LOWESTKEY);
188
189    //Major compact to produce compacted storefiles that need to be cleaned up
190    util.compact(tableName, true);
191    assertNotNull(preadScanner.next());
192    assertNotNull(streamScanner.next());
193    store.closeAndArchiveCompactedFiles(true);
194
195    try {
196      assertNotNull(preadScanner.next());
197      fail("Expected IOException");
198    }catch (IOException ex) {
199      ex.printStackTrace();
200    }
201
202    //Wait a bit for file to be remove from
203    try {
204      assertNotNull(streamScanner.next());
205      fail("Expected IOException");
206    } catch (IOException ex) {
207      ex.printStackTrace();
208    }
209  }
210}