001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertNotNull; 023import static org.junit.Assert.assertNull; 024import static org.junit.Assert.assertTrue; 025 026import java.io.IOException; 027import java.net.URI; 028import java.util.Collection; 029import java.util.List; 030import org.apache.hadoop.conf.Configuration; 031import org.apache.hadoop.fs.FSDataInputStream; 032import org.apache.hadoop.fs.FSDataOutputStream; 033import org.apache.hadoop.fs.FileStatus; 034import org.apache.hadoop.fs.FileSystem; 035import org.apache.hadoop.fs.Path; 036import org.apache.hadoop.fs.permission.FsPermission; 037import org.apache.hadoop.hbase.HBaseClassTestRule; 038import org.apache.hadoop.hbase.HBaseTestingUtility; 039import org.apache.hadoop.hbase.HColumnDescriptor; 040import org.apache.hadoop.hbase.TableName; 041import org.apache.hadoop.hbase.client.Admin; 042import org.apache.hadoop.hbase.client.HTable; 043import org.apache.hadoop.hbase.client.Put; 044import org.apache.hadoop.hbase.client.RegionInfo; 045import org.apache.hadoop.hbase.client.RegionInfoBuilder; 046import org.apache.hadoop.hbase.fs.HFileSystem; 047import org.apache.hadoop.hbase.testclassification.MediumTests; 048import org.apache.hadoop.hbase.testclassification.RegionServerTests; 049import org.apache.hadoop.hbase.util.Bytes; 050import org.apache.hadoop.hbase.util.FSUtils; 051import org.apache.hadoop.util.Progressable; 052import org.junit.ClassRule; 053import org.junit.Rule; 054import org.junit.Test; 055import org.junit.experimental.categories.Category; 056import org.junit.rules.TestName; 057import org.slf4j.Logger; 058import org.slf4j.LoggerFactory; 059 060@Category({RegionServerTests.class, MediumTests.class}) 061public class TestHRegionFileSystem { 062 063 @ClassRule 064 public static final HBaseClassTestRule CLASS_RULE = 065 HBaseClassTestRule.forClass(TestHRegionFileSystem.class); 066 067 private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 068 private static final Logger LOG = LoggerFactory.getLogger(TestHRegionFileSystem.class); 069 070 public static final byte[] FAMILY_NAME = Bytes.toBytes("info"); 071 private static final byte[][] FAMILIES = { 072 Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), 073 Bytes.add(FAMILY_NAME, Bytes.toBytes("-B")) }; 074 private static final TableName TABLE_NAME = TableName.valueOf("TestTable"); 075 076 @Rule 077 public TestName name = new TestName(); 078 079 @Test 080 public void testBlockStoragePolicy() throws Exception { 081 TEST_UTIL = new HBaseTestingUtility(); 082 Configuration conf = TEST_UTIL.getConfiguration(); 083 TEST_UTIL.startMiniCluster(); 084 HTable table = (HTable) TEST_UTIL.createTable(TABLE_NAME, FAMILIES); 085 assertEquals("Should start with empty table", 0, TEST_UTIL.countRows(table)); 086 HRegionFileSystem regionFs = getHRegionFS(table, conf); 087 // the original block storage policy would be HOT 088 String spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0])); 089 String spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1])); 090 LOG.debug("Storage policy of cf 0: [" + spA + "]."); 091 LOG.debug("Storage policy of cf 1: [" + spB + "]."); 092 assertEquals("HOT", spA); 093 assertEquals("HOT", spB); 094 095 // Recreate table and make sure storage policy could be set through configuration 096 TEST_UTIL.shutdownMiniCluster(); 097 TEST_UTIL.getConfiguration().set(HStore.BLOCK_STORAGE_POLICY_KEY, "WARM"); 098 TEST_UTIL.startMiniCluster(); 099 table = (HTable) TEST_UTIL.createTable(TABLE_NAME, FAMILIES); 100 regionFs = getHRegionFS(table, conf); 101 102 try (Admin admin = TEST_UTIL.getConnection().getAdmin()) { 103 spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0])); 104 spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1])); 105 LOG.debug("Storage policy of cf 0: [" + spA + "]."); 106 LOG.debug("Storage policy of cf 1: [" + spB + "]."); 107 assertEquals("WARM", spA); 108 assertEquals("WARM", spB); 109 110 // alter table cf schema to change storage policies 111 // and make sure it could override settings in conf 112 HColumnDescriptor hcdA = new HColumnDescriptor(Bytes.toString(FAMILIES[0])); 113 // alter through setting HStore#BLOCK_STORAGE_POLICY_KEY in HColumnDescriptor 114 hcdA.setValue(HStore.BLOCK_STORAGE_POLICY_KEY, "ONE_SSD"); 115 admin.modifyColumnFamily(TABLE_NAME, hcdA); 116 while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(). 117 getRegionStates().hasRegionsInTransition()) { 118 Thread.sleep(200); 119 LOG.debug("Waiting on table to finish schema altering"); 120 } 121 // alter through HColumnDescriptor#setStoragePolicy 122 HColumnDescriptor hcdB = new HColumnDescriptor(Bytes.toString(FAMILIES[1])); 123 hcdB.setStoragePolicy("ALL_SSD"); 124 admin.modifyColumnFamily(TABLE_NAME, hcdB); 125 while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates() 126 .hasRegionsInTransition()) { 127 Thread.sleep(200); 128 LOG.debug("Waiting on table to finish schema altering"); 129 } 130 spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0])); 131 spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1])); 132 LOG.debug("Storage policy of cf 0: [" + spA + "]."); 133 LOG.debug("Storage policy of cf 1: [" + spB + "]."); 134 assertNotNull(spA); 135 assertEquals("ONE_SSD", spA); 136 assertNotNull(spB); 137 assertEquals("ALL_SSD", spB); 138 139 // flush memstore snapshot into 3 files 140 for (long i = 0; i < 3; i++) { 141 Put put = new Put(Bytes.toBytes(i)); 142 put.addColumn(FAMILIES[0], Bytes.toBytes(i), Bytes.toBytes(i)); 143 table.put(put); 144 admin.flush(TABLE_NAME); 145 } 146 // there should be 3 files in store dir 147 FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); 148 Path storePath = regionFs.getStoreDir(Bytes.toString(FAMILIES[0])); 149 FileStatus[] storeFiles = FSUtils.listStatus(fs, storePath); 150 assertNotNull(storeFiles); 151 assertEquals(3, storeFiles.length); 152 // store temp dir still exists but empty 153 Path storeTempDir = new Path(regionFs.getTempDir(), Bytes.toString(FAMILIES[0])); 154 assertTrue(fs.exists(storeTempDir)); 155 FileStatus[] tempFiles = FSUtils.listStatus(fs, storeTempDir); 156 assertNull(tempFiles); 157 // storage policy of cf temp dir and 3 store files should be ONE_SSD 158 assertEquals("ONE_SSD", 159 ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(storeTempDir)); 160 for (FileStatus status : storeFiles) { 161 assertEquals("ONE_SSD", 162 ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(status.getPath())); 163 } 164 165 // change storage policies by calling raw api directly 166 regionFs.setStoragePolicy(Bytes.toString(FAMILIES[0]), "ALL_SSD"); 167 regionFs.setStoragePolicy(Bytes.toString(FAMILIES[1]), "ONE_SSD"); 168 spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0])); 169 spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1])); 170 LOG.debug("Storage policy of cf 0: [" + spA + "]."); 171 LOG.debug("Storage policy of cf 1: [" + spB + "]."); 172 assertNotNull(spA); 173 assertEquals("ALL_SSD", spA); 174 assertNotNull(spB); 175 assertEquals("ONE_SSD", spB); 176 } finally { 177 table.close(); 178 TEST_UTIL.deleteTable(TABLE_NAME); 179 TEST_UTIL.shutdownMiniCluster(); 180 } 181 } 182 183 private HRegionFileSystem getHRegionFS(HTable table, Configuration conf) throws IOException { 184 FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); 185 Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), table.getName()); 186 List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir); 187 assertEquals(1, regionDirs.size()); 188 List<Path> familyDirs = FSUtils.getFamilyDirs(fs, regionDirs.get(0)); 189 assertEquals(2, familyDirs.size()); 190 RegionInfo hri = table.getRegionLocator().getAllRegionLocations().get(0).getRegionInfo(); 191 HRegionFileSystem regionFs = new HRegionFileSystem(conf, new HFileSystem(fs), tableDir, hri); 192 return regionFs; 193 } 194 195 @Test 196 public void testOnDiskRegionCreation() throws IOException { 197 Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testOnDiskRegionCreation"); 198 FileSystem fs = TEST_UTIL.getTestFileSystem(); 199 Configuration conf = TEST_UTIL.getConfiguration(); 200 201 // Create a Region 202 RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); 203 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, 204 FSUtils.getTableDir(rootDir, hri.getTable()), hri); 205 206 // Verify if the region is on disk 207 Path regionDir = regionFs.getRegionDir(); 208 assertTrue("The region folder should be created", fs.exists(regionDir)); 209 210 // Verify the .regioninfo 211 RegionInfo hriVerify = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); 212 assertEquals(hri, hriVerify); 213 214 // Open the region 215 regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, 216 FSUtils.getTableDir(rootDir, hri.getTable()), hri, false); 217 assertEquals(regionDir, regionFs.getRegionDir()); 218 219 // Delete the region 220 HRegionFileSystem.deleteRegionFromFileSystem(conf, fs, 221 FSUtils.getTableDir(rootDir, hri.getTable()), hri); 222 assertFalse("The region folder should be removed", fs.exists(regionDir)); 223 224 fs.delete(rootDir, true); 225 } 226 227 @Test 228 public void testNonIdempotentOpsWithRetries() throws IOException { 229 Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testOnDiskRegionCreation"); 230 FileSystem fs = TEST_UTIL.getTestFileSystem(); 231 Configuration conf = TEST_UTIL.getConfiguration(); 232 233 // Create a Region 234 RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); 235 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); 236 assertTrue(fs.exists(regionFs.getRegionDir())); 237 238 regionFs = new HRegionFileSystem(conf, new MockFileSystemForCreate(), 239 null, null); 240 // HRegionFileSystem.createRegionOnFileSystem(conf, new MockFileSystemForCreate(), rootDir, 241 // hri); 242 boolean result = regionFs.createDir(new Path("/foo/bar")); 243 assertTrue("Couldn't create the directory", result); 244 245 246 regionFs = new HRegionFileSystem(conf, new MockFileSystem(), null, null); 247 result = regionFs.rename(new Path("/foo/bar"), new Path("/foo/bar2")); 248 assertTrue("Couldn't rename the directory", result); 249 250 regionFs = new HRegionFileSystem(conf, new MockFileSystem(), null, null); 251 result = regionFs.deleteDir(new Path("/foo/bar")); 252 assertTrue("Couldn't delete the directory", result); 253 fs.delete(rootDir, true); 254 } 255 256 static class MockFileSystemForCreate extends MockFileSystem { 257 @Override 258 public boolean exists(Path path) { 259 return false; 260 } 261 } 262 263 /** 264 * a mock fs which throws exception for first 3 times, and then process the call (returns the 265 * excepted result). 266 */ 267 static class MockFileSystem extends FileSystem { 268 int retryCount; 269 final static int successRetryCount = 3; 270 271 public MockFileSystem() { 272 retryCount = 0; 273 } 274 275 @Override 276 public FSDataOutputStream append(Path arg0, int arg1, Progressable arg2) throws IOException { 277 throw new IOException(""); 278 } 279 280 @Override 281 public FSDataOutputStream create(Path arg0, FsPermission arg1, boolean arg2, int arg3, 282 short arg4, long arg5, Progressable arg6) throws IOException { 283 LOG.debug("Create, " + retryCount); 284 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 285 return null; 286 } 287 288 @Override 289 public boolean delete(Path arg0) throws IOException { 290 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 291 return true; 292 } 293 294 @Override 295 public boolean delete(Path arg0, boolean arg1) throws IOException { 296 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 297 return true; 298 } 299 300 @Override 301 public FileStatus getFileStatus(Path arg0) throws IOException { 302 FileStatus fs = new FileStatus(); 303 return fs; 304 } 305 306 @Override 307 public boolean exists(Path path) { 308 return true; 309 } 310 311 @Override 312 public URI getUri() { 313 throw new RuntimeException("Something bad happen"); 314 } 315 316 @Override 317 public Path getWorkingDirectory() { 318 throw new RuntimeException("Something bad happen"); 319 } 320 321 @Override 322 public FileStatus[] listStatus(Path arg0) throws IOException { 323 throw new IOException("Something bad happen"); 324 } 325 326 @Override 327 public boolean mkdirs(Path arg0, FsPermission arg1) throws IOException { 328 LOG.debug("mkdirs, " + retryCount); 329 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 330 return true; 331 } 332 333 @Override 334 public FSDataInputStream open(Path arg0, int arg1) throws IOException { 335 throw new IOException("Something bad happen"); 336 } 337 338 @Override 339 public boolean rename(Path arg0, Path arg1) throws IOException { 340 LOG.debug("rename, " + retryCount); 341 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen"); 342 return true; 343 } 344 345 @Override 346 public void setWorkingDirectory(Path arg0) { 347 throw new RuntimeException("Something bad happen"); 348 } 349 } 350 351 @Test 352 public void testTempAndCommit() throws IOException { 353 Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testTempAndCommit"); 354 FileSystem fs = TEST_UTIL.getTestFileSystem(); 355 Configuration conf = TEST_UTIL.getConfiguration(); 356 357 // Create a Region 358 String familyName = "cf"; 359 ; 360 RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); 361 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); 362 363 // New region, no store files 364 Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName); 365 assertEquals(0, storeFiles != null ? storeFiles.size() : 0); 366 367 // Create a new file in temp (no files in the family) 368 Path buildPath = regionFs.createTempName(); 369 fs.createNewFile(buildPath); 370 storeFiles = regionFs.getStoreFiles(familyName); 371 assertEquals(0, storeFiles != null ? storeFiles.size() : 0); 372 373 // commit the file 374 Path dstPath = regionFs.commitStoreFile(familyName, buildPath); 375 storeFiles = regionFs.getStoreFiles(familyName); 376 assertEquals(0, storeFiles != null ? storeFiles.size() : 0); 377 assertFalse(fs.exists(buildPath)); 378 379 fs.delete(rootDir, true); 380 } 381}