001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master.janitor;
019
020import static org.apache.hadoop.hbase.util.HFileArchiveTestingUtil.assertArchiveEqualToOriginal;
021import static org.junit.Assert.assertEquals;
022import static org.junit.Assert.assertFalse;
023import static org.junit.Assert.assertTrue;
024import static org.mockito.Mockito.doReturn;
025import static org.mockito.Mockito.spy;
026
027import java.io.IOException;
028import java.util.ArrayList;
029import java.util.List;
030import java.util.Map;
031import java.util.NavigableMap;
032import java.util.Objects;
033import java.util.SortedMap;
034import java.util.SortedSet;
035import java.util.TreeMap;
036import java.util.concurrent.ConcurrentSkipListMap;
037import org.apache.hadoop.fs.FSDataOutputStream;
038import org.apache.hadoop.fs.FileStatus;
039import org.apache.hadoop.fs.FileSystem;
040import org.apache.hadoop.fs.Path;
041import org.apache.hadoop.hbase.HBaseClassTestRule;
042import org.apache.hadoop.hbase.HBaseTestingUtility;
043import org.apache.hadoop.hbase.HColumnDescriptor;
044import org.apache.hadoop.hbase.HConstants;
045import org.apache.hadoop.hbase.HRegionInfo;
046import org.apache.hadoop.hbase.MetaMockingUtil;
047import org.apache.hadoop.hbase.ServerName;
048import org.apache.hadoop.hbase.TableName;
049import org.apache.hadoop.hbase.client.Result;
050import org.apache.hadoop.hbase.client.TableDescriptor;
051import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
052import org.apache.hadoop.hbase.io.Reference;
053import org.apache.hadoop.hbase.master.MasterServices;
054import org.apache.hadoop.hbase.master.assignment.MockMasterServices;
055import org.apache.hadoop.hbase.master.janitor.CatalogJanitor.SplitParentFirstComparator;
056import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
057import org.apache.hadoop.hbase.regionserver.ChunkCreator;
058import org.apache.hadoop.hbase.regionserver.HStore;
059import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
060import org.apache.hadoop.hbase.testclassification.MasterTests;
061import org.apache.hadoop.hbase.testclassification.MediumTests;
062import org.apache.hadoop.hbase.util.Bytes;
063import org.apache.hadoop.hbase.util.CommonFSUtils;
064import org.apache.hadoop.hbase.util.HFileArchiveUtil;
065import org.apache.zookeeper.KeeperException;
066import org.junit.After;
067import org.junit.Before;
068import org.junit.BeforeClass;
069import org.junit.ClassRule;
070import org.junit.Rule;
071import org.junit.Test;
072import org.junit.experimental.categories.Category;
073import org.junit.rules.TestName;
074import org.slf4j.Logger;
075import org.slf4j.LoggerFactory;
076
077@Category({MasterTests.class, MediumTests.class})
078public class TestCatalogJanitor {
079
080  @ClassRule
081  public static final HBaseClassTestRule CLASS_RULE =
082      HBaseClassTestRule.forClass(TestCatalogJanitor.class);
083
084  private static final Logger LOG = LoggerFactory.getLogger(TestCatalogJanitor.class);
085
086  private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
087
088  @Rule
089  public final TestName name = new TestName();
090
091  private MockMasterServices masterServices;
092  private CatalogJanitor janitor;
093
094  @BeforeClass
095  public static void beforeClass() throws Exception {
096    ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null,
097      MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
098  }
099
100  @Before
101  public void setup() throws IOException, KeeperException {
102    setRootDirAndCleanIt(HTU, this.name.getMethodName());
103    NavigableMap<ServerName, SortedSet<byte []>> regionsToRegionServers =
104        new ConcurrentSkipListMap<ServerName, SortedSet<byte []>>();
105    this.masterServices =
106        new MockMasterServices(HTU.getConfiguration(), regionsToRegionServers);
107    this.masterServices.start(10, null);
108    this.janitor = new CatalogJanitor(masterServices);
109  }
110
111  @After
112  public void teardown() {
113    this.janitor.cancel(true);
114    this.masterServices.stop("DONE");
115  }
116
117  /**
118   * Test clearing a split parent.
119   */
120  @Test
121  public void testCleanParent() throws IOException, InterruptedException {
122    TableDescriptor td = createTableDescriptorForCurrentMethod();
123    // Create regions.
124    HRegionInfo parent =
125        new HRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
126    HRegionInfo splita =
127        new HRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
128    HRegionInfo splitb =
129        new HRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
130    // Test that when both daughter regions are in place, that we do not remove the parent.
131    Result r = createResult(parent, splita, splitb);
132    // Add a reference under splitA directory so we don't clear out the parent.
133    Path rootdir = this.masterServices.getMasterFileSystem().getRootDir();
134    Path tabledir = CommonFSUtils.getTableDir(rootdir, td.getTableName());
135    Path parentdir = new Path(tabledir, parent.getEncodedName());
136    Path storedir = HStore.getStoreHomedir(tabledir, splita, td.getColumnFamilies()[0].getName());
137    Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
138    long now = System.currentTimeMillis();
139    // Reference name has this format: StoreFile#REF_NAME_PARSER
140    Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
141    FileSystem fs = this.masterServices.getMasterFileSystem().getFileSystem();
142    Path path = ref.write(fs, p);
143    assertTrue(fs.exists(path));
144    LOG.info("Created reference " + path);
145    // Add a parentdir for kicks so can check it gets removed by the catalogjanitor.
146    fs.mkdirs(parentdir);
147    assertFalse(CatalogJanitor.cleanParent(masterServices, parent, r));
148    ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor());
149    assertTrue(fs.exists(parentdir));
150    // Remove the reference file and try again.
151    assertTrue(fs.delete(p, true));
152    assertTrue(CatalogJanitor.cleanParent(masterServices, parent, r));
153    // Parent cleanup is run async as a procedure. Make sure parentdir is removed.
154    ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor());
155    assertTrue(!fs.exists(parentdir));
156  }
157
158  /**
159   * Make sure parent gets cleaned up even if daughter is cleaned up before it.
160   */
161  @Test
162  public void testParentCleanedEvenIfDaughterGoneFirst()
163  throws IOException, InterruptedException {
164    parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(this.name.getMethodName(),
165        Bytes.toBytes("eee"));
166  }
167
168  /**
169   * Make sure last parent with empty end key gets cleaned up even if daughter is cleaned up before it.
170   */
171  @Test
172  public void testLastParentCleanedEvenIfDaughterGoneFirst()
173  throws IOException, InterruptedException {
174    parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(this.name.getMethodName(),
175        new byte[0]);
176  }
177
178  /**
179   * @return A TableDescriptor with a tableName of current method name and a column
180   * family that is MockMasterServices.DEFAULT_COLUMN_FAMILY_NAME)
181   */
182  private TableDescriptor createTableDescriptorForCurrentMethod() {
183    return TableDescriptorBuilder.newBuilder(TableName.valueOf(this.name.getMethodName())).
184      setColumnFamily(new HColumnDescriptor(MockMasterServices.DEFAULT_COLUMN_FAMILY_NAME)).
185        build();
186  }
187
188  /**
189   * Make sure parent with specified end key gets cleaned up even if daughter is cleaned up before it.
190   *
191   * @param rootDir the test case name, used as the HBase testing utility root
192   * @param lastEndKey the end key of the split parent
193   */
194  private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
195  final String rootDir, final byte[] lastEndKey)
196  throws IOException, InterruptedException {
197    TableDescriptor td = createTableDescriptorForCurrentMethod();
198    // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
199    HRegionInfo parent = new HRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), lastEndKey);
200    // Sleep a second else the encoded name on these regions comes out
201    // same for all with same start key and made in same second.
202    Thread.sleep(1001);
203
204    // Daughter a
205    HRegionInfo splita =
206        new HRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
207    Thread.sleep(1001);
208    // Make daughters of daughter a; splitaa and splitab.
209    HRegionInfo splitaa =
210        new HRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
211    HRegionInfo splitab =
212        new HRegionInfo(td.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
213
214    // Daughter b
215    HRegionInfo splitb =
216        new HRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), lastEndKey);
217    Thread.sleep(1001);
218    // Make Daughters of daughterb; splitba and splitbb.
219    HRegionInfo splitba =
220        new HRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("ddd"));
221    HRegionInfo splitbb =
222        new HRegionInfo(td.getTableName(), Bytes.toBytes("ddd"), lastEndKey);
223
224    // First test that our Comparator works right up in CatalogJanitor.
225    SortedMap<HRegionInfo, Result> regions =
226        new TreeMap<>(new CatalogJanitor.SplitParentFirstComparator());
227    // Now make sure that this regions map sorts as we expect it to.
228    regions.put(parent, createResult(parent, splita, splitb));
229    regions.put(splitb, createResult(splitb, splitba, splitbb));
230    regions.put(splita, createResult(splita, splitaa, splitab));
231    // Assert its properly sorted.
232    int index = 0;
233    for (Map.Entry<HRegionInfo, Result> e: regions.entrySet()) {
234      if (index == 0) {
235        assertTrue(e.getKey().getEncodedName().equals(parent.getEncodedName()));
236      } else if (index == 1) {
237        assertTrue(e.getKey().getEncodedName().equals(splita.getEncodedName()));
238      } else if (index == 2) {
239        assertTrue(e.getKey().getEncodedName().equals(splitb.getEncodedName()));
240      }
241      index++;
242    }
243
244    // Now play around with the cleanParent function. Create a ref from splita up to the parent.
245    Path splitaRef =
246        createReferences(this.masterServices, td, parent, splita, Bytes.toBytes("ccc"), false);
247    // Make sure actual super parent sticks around because splita has a ref.
248    assertFalse(CatalogJanitor.cleanParent(masterServices, parent, regions.get(parent)));
249
250    //splitba, and split bb, do not have dirs in fs.  That means that if
251    // we test splitb, it should get cleaned up.
252    assertTrue(CatalogJanitor.cleanParent(masterServices, splitb, regions.get(splitb)));
253
254    // Now remove ref from splita to parent... so parent can be let go and so
255    // the daughter splita can be split (can't split if still references).
256    // BUT make the timing such that the daughter gets cleaned up before we
257    // can get a chance to let go of the parent.
258    FileSystem fs = FileSystem.get(HTU.getConfiguration());
259    assertTrue(fs.delete(splitaRef, true));
260    // Create the refs from daughters of splita.
261    Path splitaaRef =
262      createReferences(this.masterServices, td, splita, splitaa, Bytes.toBytes("bbb"), false);
263    Path splitabRef =
264      createReferences(this.masterServices, td, splita, splitab, Bytes.toBytes("bbb"), true);
265
266    // Test splita. It should stick around because references from splitab, etc.
267    assertFalse(CatalogJanitor.cleanParent(masterServices, splita, regions.get(splita)));
268
269    // Now clean up parent daughter first.  Remove references from its daughters.
270    assertTrue(fs.delete(splitaaRef, true));
271    assertTrue(fs.delete(splitabRef, true));
272    assertTrue(CatalogJanitor.cleanParent(masterServices, splita, regions.get(splita)));
273
274    // Super parent should get cleaned up now both splita and splitb are gone.
275    assertTrue(CatalogJanitor.cleanParent(masterServices, parent, regions.get(parent)));
276  }
277
278  /**
279   * CatalogJanitor.scan() should not clean parent regions if their own
280   * parents are still referencing them. This ensures that grandparent regions
281   * do not point to deleted parent regions.
282   */
283  @Test
284  public void testScanDoesNotCleanRegionsWithExistingParents() throws Exception {
285    TableDescriptor td = createTableDescriptorForCurrentMethod();
286    // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
287
288    // Parent
289    HRegionInfo parent = new HRegionInfo(td.getTableName(), Bytes.toBytes("aaa"),
290            HConstants.EMPTY_BYTE_ARRAY, true);
291    // Sleep a second else the encoded name on these regions comes out
292    // same for all with same start key and made in same second.
293    Thread.sleep(1001);
294
295    // Daughter a
296    HRegionInfo splita =
297        new HRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"), true);
298    Thread.sleep(1001);
299
300    // Make daughters of daughter a; splitaa and splitab.
301    HRegionInfo splitaa =
302        new HRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb"), false);
303    HRegionInfo splitab =
304        new HRegionInfo(td.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), false);
305
306    // Daughter b
307    HRegionInfo splitb =
308        new HRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), HConstants.EMPTY_BYTE_ARRAY);
309    Thread.sleep(1001);
310
311    // Parent has daughters splita and splitb. Splita has daughters splitaa and splitab.
312    final Map<HRegionInfo, Result> splitParents = new TreeMap<>(new SplitParentFirstComparator());
313    splitParents.put(parent, createResult(parent, splita, splitb));
314    splita.setOffline(true); //simulate that splita goes offline when it is split
315    splitParents.put(splita, createResult(splita, splitaa, splitab));
316
317    final Map<HRegionInfo, Result> mergedRegions = new TreeMap<>();
318    CatalogJanitor spy = spy(this.janitor);
319
320    Report report = new Report();
321    report.count = 10;
322    report.mergedRegions.putAll(mergedRegions);
323    report.splitParents.putAll(splitParents);
324
325    doReturn(report).when(spy).scanForReport();
326
327    // Create ref from splita to parent
328    LOG.info("parent=" + parent.getShortNameToLog() + ", splita=" + splita.getShortNameToLog());
329    Path splitaRef =
330        createReferences(this.masterServices, td, parent, splita, Bytes.toBytes("ccc"), false);
331    LOG.info("Created reference " + splitaRef);
332
333    // Parent and splita should not be removed because a reference from splita to parent.
334    int gcs = spy.scan();
335    assertEquals(0, gcs);
336
337    // Now delete the ref
338    FileSystem fs = FileSystem.get(HTU.getConfiguration());
339    assertTrue(fs.delete(splitaRef, true));
340
341    //now, both parent, and splita can be deleted
342    gcs = spy.scan();
343    assertEquals(2, gcs);
344  }
345
346  /**
347   * Test that we correctly archive all the storefiles when a region is deleted
348   * @throws Exception
349   */
350  @Test
351  public void testSplitParentFirstComparator() {
352    SplitParentFirstComparator comp = new SplitParentFirstComparator();
353    TableDescriptor td = createTableDescriptorForCurrentMethod();
354
355    /*  Region splits:
356     *
357     *  rootRegion --- firstRegion --- firstRegiona
358     *              |               |- firstRegionb
359     *              |
360     *              |- lastRegion --- lastRegiona  --- lastRegionaa
361     *                             |                |- lastRegionab
362     *                             |- lastRegionb
363     *
364     *  rootRegion   :   []  - []
365     *  firstRegion  :   []  - bbb
366     *  lastRegion   :   bbb - []
367     *  firstRegiona :   []  - aaa
368     *  firstRegionb :   aaa - bbb
369     *  lastRegiona  :   bbb - ddd
370     *  lastRegionb  :   ddd - []
371     */
372
373    // root region
374    HRegionInfo rootRegion = new HRegionInfo(td.getTableName(),
375      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, true);
376    HRegionInfo firstRegion = new HRegionInfo(td.getTableName(),
377      HConstants.EMPTY_START_ROW, Bytes.toBytes("bbb"), true);
378    HRegionInfo lastRegion = new HRegionInfo(td.getTableName(),
379      Bytes.toBytes("bbb"), HConstants.EMPTY_END_ROW, true);
380
381    assertTrue(comp.compare(rootRegion, rootRegion) == 0);
382    assertTrue(comp.compare(firstRegion, firstRegion) == 0);
383    assertTrue(comp.compare(lastRegion, lastRegion) == 0);
384    assertTrue(comp.compare(rootRegion, firstRegion) < 0);
385    assertTrue(comp.compare(rootRegion, lastRegion) < 0);
386    assertTrue(comp.compare(firstRegion, lastRegion) < 0);
387
388    //first region split into a, b
389    HRegionInfo firstRegiona = new HRegionInfo(td.getTableName(),
390      HConstants.EMPTY_START_ROW, Bytes.toBytes("aaa"), true);
391    HRegionInfo firstRegionb = new HRegionInfo(td.getTableName(),
392        Bytes.toBytes("aaa"), Bytes.toBytes("bbb"), true);
393    //last region split into a, b
394    HRegionInfo lastRegiona = new HRegionInfo(td.getTableName(),
395      Bytes.toBytes("bbb"), Bytes.toBytes("ddd"), true);
396    HRegionInfo lastRegionb = new HRegionInfo(td.getTableName(),
397      Bytes.toBytes("ddd"), HConstants.EMPTY_END_ROW, true);
398
399    assertTrue(comp.compare(firstRegiona, firstRegiona) == 0);
400    assertTrue(comp.compare(firstRegionb, firstRegionb) == 0);
401    assertTrue(comp.compare(rootRegion, firstRegiona) < 0);
402    assertTrue(comp.compare(rootRegion, firstRegionb) < 0);
403    assertTrue(comp.compare(firstRegion, firstRegiona) < 0);
404    assertTrue(comp.compare(firstRegion, firstRegionb) < 0);
405    assertTrue(comp.compare(firstRegiona, firstRegionb) < 0);
406
407    assertTrue(comp.compare(lastRegiona, lastRegiona) == 0);
408    assertTrue(comp.compare(lastRegionb, lastRegionb) == 0);
409    assertTrue(comp.compare(rootRegion, lastRegiona) < 0);
410    assertTrue(comp.compare(rootRegion, lastRegionb) < 0);
411    assertTrue(comp.compare(lastRegion, lastRegiona) < 0);
412    assertTrue(comp.compare(lastRegion, lastRegionb) < 0);
413    assertTrue(comp.compare(lastRegiona, lastRegionb) < 0);
414
415    assertTrue(comp.compare(firstRegiona, lastRegiona) < 0);
416    assertTrue(comp.compare(firstRegiona, lastRegionb) < 0);
417    assertTrue(comp.compare(firstRegionb, lastRegiona) < 0);
418    assertTrue(comp.compare(firstRegionb, lastRegionb) < 0);
419
420    HRegionInfo lastRegionaa = new HRegionInfo(td.getTableName(),
421      Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), false);
422    HRegionInfo lastRegionab = new HRegionInfo(td.getTableName(),
423      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), false);
424
425    assertTrue(comp.compare(lastRegiona, lastRegionaa) < 0);
426    assertTrue(comp.compare(lastRegiona, lastRegionab) < 0);
427    assertTrue(comp.compare(lastRegionaa, lastRegionab) < 0);
428  }
429
430  @Test
431  public void testArchiveOldRegion() throws Exception {
432    // Create regions.
433    TableDescriptor td = createTableDescriptorForCurrentMethod();
434    HRegionInfo parent = new HRegionInfo(td.getTableName(),
435        Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
436    HRegionInfo splita = new HRegionInfo(td.getTableName(),
437        Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
438    HRegionInfo splitb = new HRegionInfo(td.getTableName(),
439        Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
440
441    // Test that when both daughter regions are in place, that we do not
442    // remove the parent.
443    Result parentMetaRow = createResult(parent, splita, splitb);
444    FileSystem fs = FileSystem.get(HTU.getConfiguration());
445    Path rootdir = this.masterServices.getMasterFileSystem().getRootDir();
446    // have to set the root directory since we use it in HFileDisposer to figure out to get to the
447    // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
448    // the single test passes, but when the full suite is run, things get borked).
449    CommonFSUtils.setRootDir(fs.getConf(), rootdir);
450    Path tabledir = CommonFSUtils.getTableDir(rootdir, td.getTableName());
451    Path storedir = HStore.getStoreHomedir(tabledir, parent, td.getColumnFamilies()[0].getName());
452    Path storeArchive =
453        HFileArchiveUtil.getStoreArchivePath(this.masterServices.getConfiguration(), parent,
454            tabledir, td.getColumnFamilies()[0].getName());
455    LOG.debug("Table dir:" + tabledir);
456    LOG.debug("Store dir:" + storedir);
457    LOG.debug("Store archive dir:" + storeArchive);
458
459    // add a couple of store files that we can check for
460    FileStatus[] mockFiles = addMockStoreFiles(2, this.masterServices, storedir);
461    // get the current store files for comparison
462    FileStatus[] storeFiles = fs.listStatus(storedir);
463    int index = 0;
464    for (FileStatus file : storeFiles) {
465      LOG.debug("Have store file:" + file.getPath());
466      assertEquals("Got unexpected store file", mockFiles[index].getPath(),
467        storeFiles[index].getPath());
468      index++;
469    }
470
471    // do the cleaning of the parent
472    assertTrue(CatalogJanitor.cleanParent(masterServices, parent, parentMetaRow));
473    Path parentDir = new Path(tabledir, parent.getEncodedName());
474    // Cleanup procedure runs async. Wait till it done.
475    ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor());
476    assertTrue(!fs.exists(parentDir));
477    LOG.debug("Finished cleanup of parent region");
478
479    // and now check to make sure that the files have actually been archived
480    FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
481    logFiles("archived files", storeFiles);
482    logFiles("archived files", archivedStoreFiles);
483
484    assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
485
486    // cleanup
487    CommonFSUtils.delete(fs, rootdir, true);
488  }
489
490  /**
491   * @param description description of the files for logging
492   * @param storeFiles the status of the files to log
493   */
494  private void logFiles(String description, FileStatus[] storeFiles) {
495    LOG.debug("Current " + description + ": ");
496    for (FileStatus file : storeFiles) {
497      LOG.debug(Objects.toString(file.getPath()));
498    }
499  }
500
501  /**
502   * Test that if a store file with the same name is present as those already backed up cause the
503   * already archived files to be timestamped backup
504   */
505  @Test
506  public void testDuplicateHFileResolution() throws Exception {
507   TableDescriptor td = createTableDescriptorForCurrentMethod();
508
509    // Create regions.
510    HRegionInfo parent = new HRegionInfo(td.getTableName(),
511        Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
512    HRegionInfo splita = new HRegionInfo(td.getTableName(),
513        Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
514    HRegionInfo splitb = new HRegionInfo(td.getTableName(),
515        Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
516    // Test that when both daughter regions are in place, that we do not
517    // remove the parent.
518    Result r = createResult(parent, splita, splitb);
519    FileSystem fs = FileSystem.get(HTU.getConfiguration());
520    Path rootdir = this.masterServices.getMasterFileSystem().getRootDir();
521    // Have to set the root directory since we use it in HFileDisposer to figure out to get to the
522    // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
523    // the single test passes, but when the full suite is run, things get borked).
524    CommonFSUtils.setRootDir(fs.getConf(), rootdir);
525    Path tabledir = CommonFSUtils.getTableDir(rootdir, parent.getTable());
526    Path storedir = HStore.getStoreHomedir(tabledir, parent, td.getColumnFamilies()[0].getName());
527    LOG.info("Old root:" + rootdir);
528    LOG.info("Old table:" + tabledir);
529    LOG.info("Old store:" + storedir);
530
531    Path storeArchive = HFileArchiveUtil.getStoreArchivePath(this.masterServices.getConfiguration(),
532      parent, tabledir, td.getColumnFamilies()[0].getName());
533    LOG.info("Old archive:" + storeArchive);
534
535    // enable archiving, make sure that files get archived
536    addMockStoreFiles(2, this.masterServices, storedir);
537    // get the current store files for comparison
538    FileStatus[] storeFiles = fs.listStatus(storedir);
539    // Do the cleaning of the parent
540    assertTrue(CatalogJanitor.cleanParent(masterServices, parent, r));
541    Path parentDir = new Path(tabledir, parent.getEncodedName());
542    ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor());
543    assertTrue(!fs.exists(parentDir));
544
545    // And now check to make sure that the files have actually been archived
546    FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
547    assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
548
549    // now add store files with the same names as before to check backup
550    // enable archiving, make sure that files get archived
551    addMockStoreFiles(2, this.masterServices, storedir);
552
553    // Do the cleaning of the parent
554    assertTrue(CatalogJanitor.cleanParent(masterServices, parent, r));
555    // Cleanup procedure runs async. Wait till it done.
556    ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor());
557    assertTrue(!fs.exists(parentDir));
558
559    // and now check to make sure that the files have actually been archived
560    archivedStoreFiles = fs.listStatus(storeArchive);
561    assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);
562  }
563
564  @Test
565  public void testAlreadyRunningStatus() throws Exception {
566    int numberOfThreads = 2;
567    List<Integer> gcValues = new ArrayList<>();
568    Thread[] threads = new Thread[numberOfThreads];
569    for (int i = 0; i < numberOfThreads; i++) {
570      threads[i] = new Thread(() -> {
571        try {
572          gcValues.add(janitor.scan());
573        } catch (IOException e) {
574          throw new RuntimeException(e);
575        }
576      });
577    }
578    for (int i = 0; i < numberOfThreads; i++) {
579      threads[i].start();
580    }
581    for (int i = 0; i < numberOfThreads; i++) {
582      threads[i].join();
583    }
584    assertTrue("One janitor.scan() call should have returned -1", gcValues.contains(-1));
585  }
586
587  private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir)
588      throws IOException {
589    // get the existing store files
590    FileSystem fs = services.getMasterFileSystem().getFileSystem();
591    fs.mkdirs(storedir);
592    // create the store files in the parent
593    for (int i = 0; i < count; i++) {
594      Path storeFile = new Path(storedir, "_store" + i);
595      FSDataOutputStream dos = fs.create(storeFile, true);
596      dos.writeBytes("Some data: " + i);
597      dos.close();
598    }
599    LOG.debug("Adding " + count + " store files to the storedir:" + storedir);
600    // make sure the mock store files are there
601    FileStatus[] storeFiles = fs.listStatus(storedir);
602    assertEquals("Didn't have expected store files", count, storeFiles.length);
603    return storeFiles;
604  }
605
606  private String setRootDirAndCleanIt(final HBaseTestingUtility htu, final String subdir)
607  throws IOException {
608    Path testdir = htu.getDataTestDir(subdir);
609    FileSystem fs = FileSystem.get(htu.getConfiguration());
610    if (fs.exists(testdir)) assertTrue(fs.delete(testdir, true));
611    CommonFSUtils.setRootDir(htu.getConfiguration(), testdir);
612    return CommonFSUtils.getRootDir(htu.getConfiguration()).toString();
613  }
614
615  private Path createReferences(final MasterServices services,
616      final TableDescriptor td, final HRegionInfo parent,
617      final HRegionInfo daughter, final byte [] midkey, final boolean top)
618  throws IOException {
619    Path rootdir = services.getMasterFileSystem().getRootDir();
620    Path tabledir = CommonFSUtils.getTableDir(rootdir, parent.getTable());
621    Path storedir = HStore.getStoreHomedir(tabledir, daughter,
622      td.getColumnFamilies()[0].getName());
623    Reference ref =
624      top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
625    long now = System.currentTimeMillis();
626    // Reference name has this format: StoreFile#REF_NAME_PARSER
627    Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
628    FileSystem fs = services.getMasterFileSystem().getFileSystem();
629    ref.write(fs, p);
630    return p;
631  }
632
633  private Result createResult(final HRegionInfo parent, final HRegionInfo a,
634      final HRegionInfo b)
635  throws IOException {
636    return MetaMockingUtil.getMetaTableRowResult(parent, null, a, b);
637  }
638}