001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertNotNull; 023import static org.junit.Assert.assertNull; 024import static org.junit.Assert.assertTrue; 025 026import java.io.IOException; 027import java.net.URI; 028import java.util.List; 029import org.apache.hadoop.conf.Configuration; 030import org.apache.hadoop.fs.FSDataInputStream; 031import org.apache.hadoop.fs.FSDataOutputStream; 032import org.apache.hadoop.fs.FileStatus; 033import org.apache.hadoop.fs.FileSystem; 034import org.apache.hadoop.fs.Path; 035import org.apache.hadoop.fs.permission.FsPermission; 036import org.apache.hadoop.hbase.HBaseClassTestRule; 037import org.apache.hadoop.hbase.HBaseTestingUtil; 038import org.apache.hadoop.hbase.TableName; 039import org.apache.hadoop.hbase.client.Admin; 040import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 041import org.apache.hadoop.hbase.client.Connection; 042import org.apache.hadoop.hbase.client.Put; 043import org.apache.hadoop.hbase.client.RegionInfo; 044import org.apache.hadoop.hbase.client.RegionInfoBuilder; 045import org.apache.hadoop.hbase.client.Table; 046import org.apache.hadoop.hbase.fs.HFileSystem; 047import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; 048import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; 049import org.apache.hadoop.hbase.testclassification.LargeTests; 050import org.apache.hadoop.hbase.testclassification.RegionServerTests; 051import org.apache.hadoop.hbase.util.Bytes; 052import org.apache.hadoop.hbase.util.CommonFSUtils; 053import org.apache.hadoop.hbase.util.FSUtils; 054import org.apache.hadoop.util.Progressable; 055import org.junit.ClassRule; 056import org.junit.Rule; 057import org.junit.Test; 058import org.junit.experimental.categories.Category; 059import org.junit.rules.TestName; 060import org.slf4j.Logger; 061import org.slf4j.LoggerFactory; 062 063@Category({ RegionServerTests.class, LargeTests.class }) 064public class TestHRegionFileSystem { 065 066 @ClassRule 067 public static final HBaseClassTestRule CLASS_RULE = 068 HBaseClassTestRule.forClass(TestHRegionFileSystem.class); 069 070 private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); 071 private static final Logger LOG = LoggerFactory.getLogger(TestHRegionFileSystem.class); 072 073 public static final byte[] FAMILY_NAME = Bytes.toBytes("info"); 074 private static final byte[][] FAMILIES = 075 { Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B")) }; 076 private static final TableName TABLE_NAME = TableName.valueOf("TestTable"); 077 078 @Rule 079 public TestName name = new TestName(); 080 081 @Test 082 public void testBlockStoragePolicy() throws Exception { 083 TEST_UTIL = new HBaseTestingUtil(); 084 Configuration conf = TEST_UTIL.getConfiguration(); 085 TEST_UTIL.startMiniCluster(); 086 Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES); 087 assertEquals("Should start with empty table", 0, TEST_UTIL.countRows(table)); 088 HRegionFileSystem regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf); 089 // the original block storage policy would be HOT 090 String spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0])); 091 String spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1])); 092 LOG.debug("Storage policy of cf 0: [" + spA + "]."); 093 LOG.debug("Storage policy of cf 1: [" + spB + "]."); 094 assertEquals("HOT", spA); 095 assertEquals("HOT", spB); 096 097 // Recreate table and make sure storage policy could be set through configuration 098 TEST_UTIL.shutdownMiniCluster(); 099 TEST_UTIL.getConfiguration().set(HStore.BLOCK_STORAGE_POLICY_KEY, "WARM"); 100 TEST_UTIL.startMiniCluster(); 101 table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES); 102 regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf); 103 104 try (Admin admin = TEST_UTIL.getConnection().getAdmin()) { 105 spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0])); 106 spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1])); 107 LOG.debug("Storage policy of cf 0: [" + spA + "]."); 108 LOG.debug("Storage policy of cf 1: [" + spB + "]."); 109 assertEquals("WARM", spA); 110 assertEquals("WARM", spB); 111 112 // alter table cf schema to change storage policies 113 // and make sure it could override settings in conf 114 ColumnFamilyDescriptorBuilder cfdA = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]); 115 // alter through setting HStore#BLOCK_STORAGE_POLICY_KEY in HColumnDescriptor 116 cfdA.setValue(HStore.BLOCK_STORAGE_POLICY_KEY, "ONE_SSD"); 117 admin.modifyColumnFamily(TABLE_NAME, cfdA.build()); 118 while ( 119 TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().hasRegionsInTransition() 120 ) { 121 Thread.sleep(200); 122 LOG.debug("Waiting on table to finish schema altering"); 123 } 124 // alter through HColumnDescriptor#setStoragePolicy 125 ColumnFamilyDescriptorBuilder cfdB = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[1]); 126 cfdB.setStoragePolicy("ALL_SSD"); 127 admin.modifyColumnFamily(TABLE_NAME, cfdB.build()); 128 while ( 129 TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().hasRegionsInTransition() 130 ) { 131 Thread.sleep(200); 132 LOG.debug("Waiting on table to finish schema altering"); 133 } 134 spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0])); 135 spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1])); 136 LOG.debug("Storage policy of cf 0: [" + spA + "]."); 137 LOG.debug("Storage policy of cf 1: [" + spB + "]."); 138 assertNotNull(spA); 139 assertEquals("ONE_SSD", spA); 140 assertNotNull(spB); 141 assertEquals("ALL_SSD", spB); 142 143 // flush memstore snapshot into 3 files 144 for (long i = 0; i < 3; i++) { 145 Put put = new Put(Bytes.toBytes(i)); 146 put.addColumn(FAMILIES[0], Bytes.toBytes(i), Bytes.toBytes(i)); 147 table.put(put); 148 admin.flush(TABLE_NAME); 149 } 150 // there should be 3 files in store dir 151 FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); 152 Path storePath = regionFs.getStoreDir(Bytes.toString(FAMILIES[0])); 153 FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, storePath); 154 assertNotNull(storeFiles); 155 assertEquals(3, storeFiles.length); 156 // store temp dir still exists but empty 157 Path storeTempDir = new Path(regionFs.getTempDir(), Bytes.toString(FAMILIES[0])); 158 assertTrue(fs.exists(storeTempDir)); 159 FileStatus[] tempFiles = CommonFSUtils.listStatus(fs, storeTempDir); 160 assertNull(tempFiles); 161 // storage policy of cf temp dir and 3 store files should be ONE_SSD 162 assertEquals("ONE_SSD", 163 ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(storeTempDir)); 164 for (FileStatus status : storeFiles) { 165 assertEquals("ONE_SSD", 166 ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(status.getPath())); 167 } 168 169 // change storage policies by calling raw api directly 170 regionFs.setStoragePolicy(Bytes.toString(FAMILIES[0]), "ALL_SSD"); 171 regionFs.setStoragePolicy(Bytes.toString(FAMILIES[1]), "ONE_SSD"); 172 spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0])); 173 spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1])); 174 LOG.debug("Storage policy of cf 0: [" + spA + "]."); 175 LOG.debug("Storage policy of cf 1: [" + spB + "]."); 176 assertNotNull(spA); 177 assertEquals("ALL_SSD", spA); 178 assertNotNull(spB); 179 assertEquals("ONE_SSD", spB); 180 } finally { 181 table.close(); 182 TEST_UTIL.deleteTable(TABLE_NAME); 183 TEST_UTIL.shutdownMiniCluster(); 184 } 185 } 186 187 private HRegionFileSystem getHRegionFS(Connection conn, Table table, Configuration conf) 188 throws IOException { 189 FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); 190 Path tableDir = CommonFSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), table.getName()); 191 List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir); 192 assertEquals(1, regionDirs.size()); 193 List<Path> familyDirs = FSUtils.getFamilyDirs(fs, regionDirs.get(0)); 194 assertEquals(2, familyDirs.size()); 195 RegionInfo hri = 196 conn.getRegionLocator(table.getName()).getAllRegionLocations().get(0).getRegion(); 197 HRegionFileSystem regionFs = new HRegionFileSystem(conf, new HFileSystem(fs), tableDir, hri); 198 return regionFs; 199 } 200 201 @Test 202 public void testOnDiskRegionCreation() throws IOException { 203 Path rootDir = TEST_UTIL.getDataTestDirOnTestFS(name.getMethodName()); 204 FileSystem fs = TEST_UTIL.getTestFileSystem(); 205 Configuration conf = TEST_UTIL.getConfiguration(); 206 207 // Create a Region 208 RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); 209 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, 210 CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri); 211 212 // Verify if the region is on disk 213 Path regionDir = regionFs.getRegionDir(); 214 assertTrue("The region folder should be created", fs.exists(regionDir)); 215 216 // Verify the .regioninfo 217 RegionInfo hriVerify = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); 218 assertEquals(hri, hriVerify); 219 220 // Open the region 221 regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, 222 CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri, false); 223 assertEquals(regionDir, regionFs.getRegionDir()); 224 225 // Delete the region 226 HRegionFileSystem.deleteRegionFromFileSystem(conf, fs, 227 CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri); 228 assertFalse("The region folder should be removed", fs.exists(regionDir)); 229 230 fs.delete(rootDir, true); 231 } 232 233 @Test 234 public void testNonIdempotentOpsWithRetries() throws IOException { 235 Path rootDir = TEST_UTIL.getDataTestDirOnTestFS(name.getMethodName()); 236 FileSystem fs = TEST_UTIL.getTestFileSystem(); 237 Configuration conf = TEST_UTIL.getConfiguration(); 238 239 // Create a Region 240 RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); 241 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); 242 assertTrue(fs.exists(regionFs.getRegionDir())); 243 244 regionFs = new HRegionFileSystem(conf, new MockFileSystemForCreate(), rootDir, hri); 245 boolean result = regionFs.createDir(new Path("/foo/bar")); 246 assertTrue("Couldn't create the directory", result); 247 248 regionFs = new HRegionFileSystem(conf, new MockFileSystem(), rootDir, hri); 249 result = regionFs.rename(new Path("/foo/bar"), new Path("/foo/bar2")); 250 assertTrue("Couldn't rename the directory", result); 251 252 regionFs = new HRegionFileSystem(conf, new MockFileSystem(), rootDir, hri); 253 result = regionFs.deleteDir(new Path("/foo/bar")); 254 assertTrue("Couldn't delete the directory", result); 255 fs.delete(rootDir, true); 256 } 257 258 static class MockFileSystemForCreate extends MockFileSystem { 259 @Override 260 public boolean exists(Path path) { 261 return false; 262 } 263 } 264 265 /** 266 * a mock fs which throws exception for first 3 times, and then process the call (returns the 267 * excepted result). 268 */ 269 static class MockFileSystem extends FileSystem { 270 int retryCount; 271 final static int successRetryCount = 3; 272 273 public MockFileSystem() { 274 retryCount = 0; 275 } 276 277 @Override 278 public FSDataOutputStream append(Path arg0, int arg1, Progressable arg2) throws IOException { 279 throw new IOException(""); 280 } 281 282 @Override 283 public FSDataOutputStream create(Path arg0, FsPermission arg1, boolean arg2, int arg3, 284 short arg4, long arg5, Progressable arg6) throws IOException { 285 LOG.debug("Create, " + retryCount); 286 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 287 return null; 288 } 289 290 @Override 291 public boolean delete(Path arg0) throws IOException { 292 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 293 return true; 294 } 295 296 @Override 297 public boolean delete(Path arg0, boolean arg1) throws IOException { 298 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 299 return true; 300 } 301 302 @Override 303 public FileStatus getFileStatus(Path arg0) throws IOException { 304 FileStatus fs = new FileStatus(); 305 return fs; 306 } 307 308 @Override 309 public boolean exists(Path path) { 310 return true; 311 } 312 313 @Override 314 public URI getUri() { 315 throw new RuntimeException("Something bad happen"); 316 } 317 318 @Override 319 public Path getWorkingDirectory() { 320 throw new RuntimeException("Something bad happen"); 321 } 322 323 @Override 324 public FileStatus[] listStatus(Path arg0) throws IOException { 325 throw new IOException("Something bad happen"); 326 } 327 328 @Override 329 public boolean mkdirs(Path arg0, FsPermission arg1) throws IOException { 330 LOG.debug("mkdirs, " + retryCount); 331 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 332 return true; 333 } 334 335 @Override 336 public FSDataInputStream open(Path arg0, int arg1) throws IOException { 337 throw new IOException("Something bad happen"); 338 } 339 340 @Override 341 public boolean rename(Path arg0, Path arg1) throws IOException { 342 LOG.debug("rename, " + retryCount); 343 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 344 return true; 345 } 346 347 @Override 348 public void setWorkingDirectory(Path arg0) { 349 throw new RuntimeException("Something bad happen"); 350 } 351 } 352 353 @Test 354 public void testTempAndCommit() throws IOException { 355 Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testTempAndCommit"); 356 FileSystem fs = TEST_UTIL.getTestFileSystem(); 357 Configuration conf = TEST_UTIL.getConfiguration(); 358 359 // Create a Region 360 String familyName = "cf"; 361 362 RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); 363 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); 364 StoreContext storeContext = StoreContext.getBuilder() 365 .withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.of(familyName)) 366 .withFamilyStoreDirectoryPath( 367 new Path(regionFs.getTableDir(), new Path(hri.getRegionNameAsString(), familyName))) 368 .withRegionFileSystem(regionFs).build(); 369 StoreFileTracker sft = StoreFileTrackerFactory.create(conf, false, storeContext); 370 // New region, no store files 371 List<StoreFileInfo> storeFiles = sft.load(); 372 assertEquals(0, storeFiles != null ? storeFiles.size() : 0); 373 374 // Create a new file in temp (no files in the family) 375 Path buildPath = regionFs.createTempName(); 376 fs.createNewFile(buildPath); 377 storeFiles = sft.load(); 378 assertEquals(0, storeFiles != null ? storeFiles.size() : 0); 379 380 // commit the file 381 Path dstPath = regionFs.commitStoreFile(familyName, buildPath); 382 storeFiles = sft.load(); 383 assertEquals(0, storeFiles != null ? storeFiles.size() : 0); 384 assertFalse(fs.exists(buildPath)); 385 386 fs.delete(rootDir, true); 387 } 388}