001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertNotNull; 023import static org.junit.Assert.assertNull; 024import static org.junit.Assert.assertTrue; 025 026import java.io.IOException; 027import java.net.URI; 028import java.util.Collection; 029import java.util.List; 030import org.apache.hadoop.conf.Configuration; 031import org.apache.hadoop.fs.FSDataInputStream; 032import org.apache.hadoop.fs.FSDataOutputStream; 033import org.apache.hadoop.fs.FileStatus; 034import org.apache.hadoop.fs.FileSystem; 035import org.apache.hadoop.fs.Path; 036import org.apache.hadoop.fs.permission.FsPermission; 037import org.apache.hadoop.hbase.HBaseClassTestRule; 038import org.apache.hadoop.hbase.HBaseTestingUtil; 039import org.apache.hadoop.hbase.TableName; 040import org.apache.hadoop.hbase.client.Admin; 041import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 042import org.apache.hadoop.hbase.client.Connection; 043import org.apache.hadoop.hbase.client.Put; 044import org.apache.hadoop.hbase.client.RegionInfo; 045import org.apache.hadoop.hbase.client.RegionInfoBuilder; 046import org.apache.hadoop.hbase.client.Table; 047import org.apache.hadoop.hbase.fs.HFileSystem; 048import org.apache.hadoop.hbase.testclassification.LargeTests; 049import org.apache.hadoop.hbase.testclassification.RegionServerTests; 050import org.apache.hadoop.hbase.util.Bytes; 051import org.apache.hadoop.hbase.util.CommonFSUtils; 052import org.apache.hadoop.hbase.util.FSUtils; 053import org.apache.hadoop.util.Progressable; 054import org.junit.ClassRule; 055import org.junit.Rule; 056import org.junit.Test; 057import org.junit.experimental.categories.Category; 058import org.junit.rules.TestName; 059import org.slf4j.Logger; 060import org.slf4j.LoggerFactory; 061 062@Category({ RegionServerTests.class, LargeTests.class }) 063public class TestHRegionFileSystem { 064 065 @ClassRule 066 public static final HBaseClassTestRule CLASS_RULE = 067 HBaseClassTestRule.forClass(TestHRegionFileSystem.class); 068 069 private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); 070 private static final Logger LOG = LoggerFactory.getLogger(TestHRegionFileSystem.class); 071 072 public static final byte[] FAMILY_NAME = Bytes.toBytes("info"); 073 private static final byte[][] FAMILIES = 074 { Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B")) }; 075 private static final TableName TABLE_NAME = TableName.valueOf("TestTable"); 076 077 @Rule 078 public TestName name = new TestName(); 079 080 @Test 081 public void testBlockStoragePolicy() throws Exception { 082 TEST_UTIL = new HBaseTestingUtil(); 083 Configuration conf = TEST_UTIL.getConfiguration(); 084 TEST_UTIL.startMiniCluster(); 085 Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES); 086 assertEquals("Should start with empty table", 0, TEST_UTIL.countRows(table)); 087 HRegionFileSystem regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf); 088 // the original block storage policy would be HOT 089 String spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0])); 090 String spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1])); 091 LOG.debug("Storage policy of cf 0: [" + spA + "]."); 092 LOG.debug("Storage policy of cf 1: [" + spB + "]."); 093 assertEquals("HOT", spA); 094 assertEquals("HOT", spB); 095 096 // Recreate table and make sure storage policy could be set through configuration 097 TEST_UTIL.shutdownMiniCluster(); 098 TEST_UTIL.getConfiguration().set(HStore.BLOCK_STORAGE_POLICY_KEY, "WARM"); 099 TEST_UTIL.startMiniCluster(); 100 table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES); 101 regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf); 102 103 try (Admin admin = TEST_UTIL.getConnection().getAdmin()) { 104 spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0])); 105 spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1])); 106 LOG.debug("Storage policy of cf 0: [" + spA + "]."); 107 LOG.debug("Storage policy of cf 1: [" + spB + "]."); 108 assertEquals("WARM", spA); 109 assertEquals("WARM", spB); 110 111 // alter table cf schema to change storage policies 112 // and make sure it could override settings in conf 113 ColumnFamilyDescriptorBuilder cfdA = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]); 114 // alter through setting HStore#BLOCK_STORAGE_POLICY_KEY in HColumnDescriptor 115 cfdA.setValue(HStore.BLOCK_STORAGE_POLICY_KEY, "ONE_SSD"); 116 admin.modifyColumnFamily(TABLE_NAME, cfdA.build()); 117 while ( 118 TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates() 119 .hasRegionsInTransition() 120 ) { 121 Thread.sleep(200); 122 LOG.debug("Waiting on table to finish schema altering"); 123 } 124 // alter through HColumnDescriptor#setStoragePolicy 125 ColumnFamilyDescriptorBuilder cfdB = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[1]); 126 cfdB.setStoragePolicy("ALL_SSD"); 127 admin.modifyColumnFamily(TABLE_NAME, cfdB.build()); 128 while ( 129 TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates() 130 .hasRegionsInTransition() 131 ) { 132 Thread.sleep(200); 133 LOG.debug("Waiting on table to finish schema altering"); 134 } 135 spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0])); 136 spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1])); 137 LOG.debug("Storage policy of cf 0: [" + spA + "]."); 138 LOG.debug("Storage policy of cf 1: [" + spB + "]."); 139 assertNotNull(spA); 140 assertEquals("ONE_SSD", spA); 141 assertNotNull(spB); 142 assertEquals("ALL_SSD", spB); 143 144 // flush memstore snapshot into 3 files 145 for (long i = 0; i < 3; i++) { 146 Put put = new Put(Bytes.toBytes(i)); 147 put.addColumn(FAMILIES[0], Bytes.toBytes(i), Bytes.toBytes(i)); 148 table.put(put); 149 admin.flush(TABLE_NAME); 150 } 151 // there should be 3 files in store dir 152 FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); 153 Path storePath = regionFs.getStoreDir(Bytes.toString(FAMILIES[0])); 154 FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, storePath); 155 assertNotNull(storeFiles); 156 assertEquals(3, storeFiles.length); 157 // store temp dir still exists but empty 158 Path storeTempDir = new Path(regionFs.getTempDir(), Bytes.toString(FAMILIES[0])); 159 assertTrue(fs.exists(storeTempDir)); 160 FileStatus[] tempFiles = CommonFSUtils.listStatus(fs, storeTempDir); 161 assertNull(tempFiles); 162 // storage policy of cf temp dir and 3 store files should be ONE_SSD 163 assertEquals("ONE_SSD", 164 ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(storeTempDir)); 165 for (FileStatus status : storeFiles) { 166 assertEquals("ONE_SSD", 167 ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(status.getPath())); 168 } 169 170 // change storage policies by calling raw api directly 171 regionFs.setStoragePolicy(Bytes.toString(FAMILIES[0]), "ALL_SSD"); 172 regionFs.setStoragePolicy(Bytes.toString(FAMILIES[1]), "ONE_SSD"); 173 spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0])); 174 spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1])); 175 LOG.debug("Storage policy of cf 0: [" + spA + "]."); 176 LOG.debug("Storage policy of cf 1: [" + spB + "]."); 177 assertNotNull(spA); 178 assertEquals("ALL_SSD", spA); 179 assertNotNull(spB); 180 assertEquals("ONE_SSD", spB); 181 } finally { 182 table.close(); 183 TEST_UTIL.deleteTable(TABLE_NAME); 184 TEST_UTIL.shutdownMiniCluster(); 185 } 186 } 187 188 private HRegionFileSystem getHRegionFS(Connection conn, Table table, Configuration conf) 189 throws IOException { 190 FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); 191 Path tableDir = CommonFSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), table.getName()); 192 List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir); 193 assertEquals(1, regionDirs.size()); 194 List<Path> familyDirs = FSUtils.getFamilyDirs(fs, regionDirs.get(0)); 195 assertEquals(2, familyDirs.size()); 196 RegionInfo hri = 197 conn.getRegionLocator(table.getName()).getAllRegionLocations().get(0).getRegion(); 198 HRegionFileSystem regionFs = new HRegionFileSystem(conf, new HFileSystem(fs), tableDir, hri); 199 return regionFs; 200 } 201 202 @Test 203 public void testOnDiskRegionCreation() throws IOException { 204 Path rootDir = TEST_UTIL.getDataTestDirOnTestFS(name.getMethodName()); 205 FileSystem fs = TEST_UTIL.getTestFileSystem(); 206 Configuration conf = TEST_UTIL.getConfiguration(); 207 208 // Create a Region 209 RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); 210 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, 211 CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri); 212 213 // Verify if the region is on disk 214 Path regionDir = regionFs.getRegionDir(); 215 assertTrue("The region folder should be created", fs.exists(regionDir)); 216 217 // Verify the .regioninfo 218 RegionInfo hriVerify = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); 219 assertEquals(hri, hriVerify); 220 221 // Open the region 222 regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, 223 CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri, false); 224 assertEquals(regionDir, regionFs.getRegionDir()); 225 226 // Delete the region 227 HRegionFileSystem.deleteRegionFromFileSystem(conf, fs, 228 CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri); 229 assertFalse("The region folder should be removed", fs.exists(regionDir)); 230 231 fs.delete(rootDir, true); 232 } 233 234 @Test 235 public void testNonIdempotentOpsWithRetries() throws IOException { 236 Path rootDir = TEST_UTIL.getDataTestDirOnTestFS(name.getMethodName()); 237 FileSystem fs = TEST_UTIL.getTestFileSystem(); 238 Configuration conf = TEST_UTIL.getConfiguration(); 239 240 // Create a Region 241 RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); 242 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); 243 assertTrue(fs.exists(regionFs.getRegionDir())); 244 245 regionFs = new HRegionFileSystem(conf, new MockFileSystemForCreate(), rootDir, hri); 246 boolean result = regionFs.createDir(new Path("/foo/bar")); 247 assertTrue("Couldn't create the directory", result); 248 249 regionFs = new HRegionFileSystem(conf, new MockFileSystem(), rootDir, hri); 250 result = regionFs.rename(new Path("/foo/bar"), new Path("/foo/bar2")); 251 assertTrue("Couldn't rename the directory", result); 252 253 regionFs = new HRegionFileSystem(conf, new MockFileSystem(), rootDir, hri); 254 result = regionFs.deleteDir(new Path("/foo/bar")); 255 assertTrue("Couldn't delete the directory", result); 256 fs.delete(rootDir, true); 257 } 258 259 static class MockFileSystemForCreate extends MockFileSystem { 260 @Override 261 public boolean exists(Path path) { 262 return false; 263 } 264 } 265 266 /** 267 * a mock fs which throws exception for first 3 times, and then process the call (returns the 268 * excepted result). 269 */ 270 static class MockFileSystem extends FileSystem { 271 int retryCount; 272 final static int successRetryCount = 3; 273 274 public MockFileSystem() { 275 retryCount = 0; 276 } 277 278 @Override 279 public FSDataOutputStream append(Path arg0, int arg1, Progressable arg2) throws IOException { 280 throw new IOException(""); 281 } 282 283 @Override 284 public FSDataOutputStream create(Path arg0, FsPermission arg1, boolean arg2, int arg3, 285 short arg4, long arg5, Progressable arg6) throws IOException { 286 LOG.debug("Create, " + retryCount); 287 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 288 return null; 289 } 290 291 @Override 292 public boolean delete(Path arg0) throws IOException { 293 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 294 return true; 295 } 296 297 @Override 298 public boolean delete(Path arg0, boolean arg1) throws IOException { 299 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 300 return true; 301 } 302 303 @Override 304 public FileStatus getFileStatus(Path arg0) throws IOException { 305 FileStatus fs = new FileStatus(); 306 return fs; 307 } 308 309 @Override 310 public boolean exists(Path path) { 311 return true; 312 } 313 314 @Override 315 public URI getUri() { 316 throw new RuntimeException("Something bad happen"); 317 } 318 319 @Override 320 public Path getWorkingDirectory() { 321 throw new RuntimeException("Something bad happen"); 322 } 323 324 @Override 325 public FileStatus[] listStatus(Path arg0) throws IOException { 326 throw new IOException("Something bad happen"); 327 } 328 329 @Override 330 public boolean mkdirs(Path arg0, FsPermission arg1) throws IOException { 331 LOG.debug("mkdirs, " + retryCount); 332 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 333 return true; 334 } 335 336 @Override 337 public FSDataInputStream open(Path arg0, int arg1) throws IOException { 338 throw new IOException("Something bad happen"); 339 } 340 341 @Override 342 public boolean rename(Path arg0, Path arg1) throws IOException { 343 LOG.debug("rename, " + retryCount); 344 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 345 return true; 346 } 347 348 @Override 349 public void setWorkingDirectory(Path arg0) { 350 throw new RuntimeException("Something bad happen"); 351 } 352 } 353 354 @Test 355 public void testTempAndCommit() throws IOException { 356 Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testTempAndCommit"); 357 FileSystem fs = TEST_UTIL.getTestFileSystem(); 358 Configuration conf = TEST_UTIL.getConfiguration(); 359 360 // Create a Region 361 String familyName = "cf"; 362 363 RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); 364 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); 365 366 // New region, no store files 367 Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName); 368 assertEquals(0, storeFiles != null ? storeFiles.size() : 0); 369 370 // Create a new file in temp (no files in the family) 371 Path buildPath = regionFs.createTempName(); 372 fs.createNewFile(buildPath); 373 storeFiles = regionFs.getStoreFiles(familyName); 374 assertEquals(0, storeFiles != null ? storeFiles.size() : 0); 375 376 // commit the file 377 Path dstPath = regionFs.commitStoreFile(familyName, buildPath); 378 storeFiles = regionFs.getStoreFiles(familyName); 379 assertEquals(0, storeFiles != null ? storeFiles.size() : 0); 380 assertFalse(fs.exists(buildPath)); 381 382 fs.delete(rootDir, true); 383 } 384}