001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile;
019
020import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY;
021import static org.junit.Assert.assertEquals;
022import static org.junit.Assert.assertTrue;
023
024import java.io.File;
025import java.util.List;
026import org.apache.hadoop.conf.Configuration;
027import org.apache.hadoop.fs.Path;
028import org.apache.hadoop.hbase.HBaseClassTestRule;
029import org.apache.hadoop.hbase.HBaseTestingUtil;
030import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
031import org.apache.hadoop.hbase.StartTestingClusterOption;
032import org.apache.hadoop.hbase.TableName;
033import org.apache.hadoop.hbase.client.Admin;
034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
035import org.apache.hadoop.hbase.client.Put;
036import org.apache.hadoop.hbase.client.Table;
037import org.apache.hadoop.hbase.client.TableDescriptor;
038import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
039import org.apache.hadoop.hbase.regionserver.HRegionServer;
040import org.apache.hadoop.hbase.regionserver.HStoreFile;
041import org.apache.hadoop.hbase.testclassification.IOTests;
042import org.apache.hadoop.hbase.testclassification.LargeTests;
043import org.apache.hadoop.hbase.util.Bytes;
044import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
045import org.junit.After;
046import org.junit.Before;
047import org.junit.ClassRule;
048import org.junit.Test;
049import org.junit.experimental.categories.Category;
050import org.slf4j.Logger;
051import org.slf4j.LoggerFactory;
052
053@Category({ IOTests.class, LargeTests.class })
054public class TestPrefetchRSClose {
055
056  @ClassRule
057  public static final HBaseClassTestRule CLASS_RULE =
058    HBaseClassTestRule.forClass(TestPrefetchRSClose.class);
059
060  private static final Logger LOG = LoggerFactory.getLogger(TestPrefetchRSClose.class);
061
062  private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
063
064  private Configuration conf;
065  Path testDir;
066  MiniZooKeeperCluster zkCluster;
067  SingleProcessHBaseCluster cluster;
068  StartTestingClusterOption option =
069    StartTestingClusterOption.builder().numRegionServers(1).build();
070
071  @Before
072  public void setup() throws Exception {
073    conf = TEST_UTIL.getConfiguration();
074    testDir = TEST_UTIL.getDataTestDir();
075    TEST_UTIL.getTestFileSystem().mkdirs(testDir);
076
077    conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true);
078    conf.set(BUCKET_CACHE_IOENGINE_KEY, "file:" + testDir + "/bucket.cache");
079    conf.setInt("hbase.bucketcache.size", 400);
080    conf.set("hbase.bucketcache.persistent.path", testDir + "/bucket.persistence");
081    zkCluster = TEST_UTIL.startMiniZKCluster();
082    cluster = TEST_UTIL.startMiniHBaseCluster(option);
083    cluster.setConf(conf);
084  }
085
086  @Test
087  public void testPrefetchPersistence() throws Exception {
088
089    // Write to table and flush
090    TableName tableName = TableName.valueOf("table1");
091    byte[] row0 = Bytes.toBytes("row1");
092    byte[] row1 = Bytes.toBytes("row2");
093    byte[] family = Bytes.toBytes("family");
094    byte[] qf1 = Bytes.toBytes("qf1");
095    byte[] qf2 = Bytes.toBytes("qf2");
096    byte[] value1 = Bytes.toBytes("value1");
097    byte[] value2 = Bytes.toBytes("value2");
098
099    TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
100      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
101    Table table = TEST_UTIL.createTable(td, null);
102    try {
103      // put data
104      Put put0 = new Put(row0);
105      put0.addColumn(family, qf1, 1, value1);
106      table.put(put0);
107      Put put1 = new Put(row1);
108      put1.addColumn(family, qf2, 1, value2);
109      table.put(put1);
110      TEST_UTIL.flush(tableName);
111    } finally {
112      Thread.sleep(2000);
113    }
114
115    // Default interval for cache persistence is 1000ms. So after 1000ms, both the persistence files
116    // should exist.
117
118    HRegionServer regionServingRS = cluster.getRegionServer(0);
119
120    Admin admin = TEST_UTIL.getAdmin();
121    List<String> cachedFilesList = admin.getCachedFilesList(regionServingRS.getServerName());
122    assertEquals(1, cachedFilesList.size());
123    for (HStoreFile h : regionServingRS.getRegions().get(0).getStores().get(0).getStorefiles()) {
124      assertTrue(cachedFilesList.contains(h.getPath().getName()));
125    }
126
127    // Stop the RS
128    cluster.stopRegionServer(0);
129    LOG.info("Stopped Region Server 0.");
130    Thread.sleep(1000);
131    assertTrue(new File(testDir + "/bucket.persistence").exists());
132  }
133
134  @After
135  public void tearDown() throws Exception {
136    TEST_UTIL.shutdownMiniCluster();
137    TEST_UTIL.cleanupDataTestDirOnTestFS(String.valueOf(testDir));
138    if (zkCluster != null) {
139      zkCluster.shutdown();
140    }
141  }
142}