001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.hfile; 019 020import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.assertTrue; 023 024import java.io.File; 025import java.io.IOException; 026import java.util.ArrayList; 027import java.util.List; 028import org.apache.hadoop.conf.Configuration; 029import org.apache.hadoop.fs.Path; 030import org.apache.hadoop.hbase.HBaseClassTestRule; 031import org.apache.hadoop.hbase.HBaseTestingUtil; 032import org.apache.hadoop.hbase.SingleProcessHBaseCluster; 033import org.apache.hadoop.hbase.StartTestingClusterOption; 034import org.apache.hadoop.hbase.TableName; 035import org.apache.hadoop.hbase.Waiter; 036import org.apache.hadoop.hbase.client.Admin; 037import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 038import org.apache.hadoop.hbase.client.Put; 039import org.apache.hadoop.hbase.client.Table; 040import org.apache.hadoop.hbase.client.TableDescriptor; 041import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 042import org.apache.hadoop.hbase.regionserver.HRegionServer; 043import org.apache.hadoop.hbase.regionserver.HStoreFile; 044import org.apache.hadoop.hbase.testclassification.IOTests; 045import org.apache.hadoop.hbase.testclassification.LargeTests; 046import org.apache.hadoop.hbase.util.Bytes; 047import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; 048import org.junit.After; 049import org.junit.Before; 050import org.junit.ClassRule; 051import org.junit.Test; 052import org.junit.experimental.categories.Category; 053import org.slf4j.Logger; 054import org.slf4j.LoggerFactory; 055 056@Category({ IOTests.class, LargeTests.class }) 057public class TestPrefetchRSClose { 058 059 @ClassRule 060 public static final HBaseClassTestRule CLASS_RULE = 061 HBaseClassTestRule.forClass(TestPrefetchRSClose.class); 062 063 private static final Logger LOG = LoggerFactory.getLogger(TestPrefetchRSClose.class); 064 065 private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); 066 067 private Configuration conf; 068 Path testDir; 069 MiniZooKeeperCluster zkCluster; 070 SingleProcessHBaseCluster cluster; 071 StartTestingClusterOption option = 072 StartTestingClusterOption.builder().numRegionServers(1).build(); 073 074 @Before 075 public void setup() throws Exception { 076 conf = TEST_UTIL.getConfiguration(); 077 testDir = TEST_UTIL.getDataTestDir(); 078 TEST_UTIL.getTestFileSystem().mkdirs(testDir); 079 080 conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); 081 conf.set(BUCKET_CACHE_IOENGINE_KEY, "file:" + testDir + "/bucket.cache"); 082 conf.setInt("hbase.bucketcache.size", 400); 083 conf.set("hbase.bucketcache.persistent.path", testDir + "/bucket.persistence"); 084 zkCluster = TEST_UTIL.startMiniZKCluster(); 085 cluster = TEST_UTIL.startMiniHBaseCluster(option); 086 cluster.setConf(conf); 087 } 088 089 @Test 090 public void testPrefetchPersistence() throws Exception { 091 092 // Write to table and flush 093 TableName tableName = TableName.valueOf("table1"); 094 byte[] row0 = Bytes.toBytes("row1"); 095 byte[] row1 = Bytes.toBytes("row2"); 096 byte[] family = Bytes.toBytes("family"); 097 byte[] qf1 = Bytes.toBytes("qf1"); 098 byte[] qf2 = Bytes.toBytes("qf2"); 099 byte[] value1 = Bytes.toBytes("value1"); 100 byte[] value2 = Bytes.toBytes("value2"); 101 102 TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) 103 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); 104 Table table = TEST_UTIL.createTable(td, null); 105 try { 106 // put data 107 Put put0 = new Put(row0); 108 put0.addColumn(family, qf1, 1, value1); 109 table.put(put0); 110 Put put1 = new Put(row1); 111 put1.addColumn(family, qf2, 1, value2); 112 table.put(put1); 113 TEST_UTIL.flush(tableName); 114 } finally { 115 Thread.sleep(2000); 116 } 117 118 // Default interval for cache persistence is 1000ms. So after 1000ms, both the persistence files 119 // should exist. 120 HRegionServer regionServingRS = cluster.getRegionServer(0); 121 Admin admin = TEST_UTIL.getAdmin(); 122 List<String> cachedFilesList = new ArrayList<>(); 123 Waiter.waitFor(conf, 5000, () -> { 124 try { 125 cachedFilesList.addAll(admin.getCachedFilesList(regionServingRS.getServerName())); 126 } catch (IOException e) { 127 // let the test try again 128 } 129 return cachedFilesList.size() > 0; 130 }); 131 assertEquals(1, cachedFilesList.size()); 132 for (HStoreFile h : regionServingRS.getRegions().get(0).getStores().get(0).getStorefiles()) { 133 assertTrue(cachedFilesList.contains(h.getPath().getName())); 134 } 135 136 // Stop the RS 137 cluster.stopRegionServer(0); 138 LOG.info("Stopped Region Server 0."); 139 Thread.sleep(1000); 140 assertTrue(new File(testDir + "/bucket.persistence").exists()); 141 } 142 143 @After 144 public void tearDown() throws Exception { 145 TEST_UTIL.shutdownMiniCluster(); 146 TEST_UTIL.cleanupDataTestDirOnTestFS(String.valueOf(testDir)); 147 if (zkCluster != null) { 148 zkCluster.shutdown(); 149 } 150 } 151}