001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.fs; 019 020import static org.apache.hadoop.hbase.util.LocatedBlockHelper.getLocatedBlockLocations; 021import static org.junit.jupiter.api.Assertions.assertArrayEquals; 022import static org.junit.jupiter.api.Assertions.assertEquals; 023import static org.junit.jupiter.api.Assertions.assertFalse; 024import static org.junit.jupiter.api.Assertions.assertNotNull; 025import static org.junit.jupiter.api.Assertions.assertTrue; 026 027import java.lang.reflect.Field; 028import org.apache.hadoop.conf.Configuration; 029import org.apache.hadoop.fs.FSDataOutputStream; 030import org.apache.hadoop.fs.FileSystem; 031import org.apache.hadoop.fs.Path; 032import org.apache.hadoop.hbase.HBaseTestingUtil; 033import org.apache.hadoop.hbase.HConstants; 034import org.apache.hadoop.hbase.SingleProcessHBaseCluster; 035import org.apache.hadoop.hbase.testclassification.LargeTests; 036import org.apache.hadoop.hbase.testclassification.MiscTests; 037import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 038import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; 039import org.apache.hadoop.hdfs.DFSClient; 040import org.apache.hadoop.hdfs.DistributedFileSystem; 041import org.apache.hadoop.hdfs.MiniDFSCluster; 042import org.apache.hadoop.hdfs.protocol.ClientProtocol; 043import org.apache.hadoop.hdfs.protocol.LocatedBlocks; 044import org.junit.jupiter.api.AfterEach; 045import org.junit.jupiter.api.BeforeEach; 046import org.junit.jupiter.api.Tag; 047import org.junit.jupiter.api.Test; 048 049/** 050 * Tests for the hdfs fix from HBASE-6435. Please don't add new subtest which involves starting / 051 * stopping MiniDFSCluster in this class. When stopping MiniDFSCluster, shutdown hooks would be 052 * cleared in hadoop's ShutdownHookManager in hadoop 3. This leads to 'Failed suppression of fs 053 * shutdown hook' error in region server. 054 */ 055@Tag(MiscTests.TAG) 056@Tag(LargeTests.TAG) 057public class TestBlockReorderBlockLocation { 058 059 private Configuration conf; 060 private MiniDFSCluster cluster; 061 private HBaseTestingUtil htu; 062 private DistributedFileSystem dfs; 063 private static final String host1 = "host1"; 064 private static final String host2 = "host2"; 065 private static final String host3 = "host3"; 066 067 @BeforeEach 068 public void setUp() throws Exception { 069 htu = new HBaseTestingUtil(); 070 htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks 071 htu.getConfiguration().setInt("dfs.replication", 3); 072 htu.startMiniDFSCluster(3, new String[] { "/r1", "/r2", "/r3" }, 073 new String[] { host1, host2, host3 }); 074 075 conf = htu.getConfiguration(); 076 cluster = htu.getDFSCluster(); 077 dfs = (DistributedFileSystem) FileSystem.get(conf); 078 } 079 080 @AfterEach 081 public void tearDownAfterClass() throws Exception { 082 htu.shutdownMiniCluster(); 083 } 084 085 private static ClientProtocol getNamenode(DFSClient dfsc) throws Exception { 086 Field nf = DFSClient.class.getDeclaredField("namenode"); 087 nf.setAccessible(true); 088 return (ClientProtocol) nf.get(dfsc); 089 } 090 091 /** 092 * Test that the reorder algo works as we expect. 093 */ 094 @Test 095 public void testBlockLocation() throws Exception { 096 // We need to start HBase to get HConstants.HBASE_DIR set in conf 097 htu.startMiniZKCluster(); 098 SingleProcessHBaseCluster hbm = htu.startMiniHBaseCluster(); 099 conf = hbm.getConfiguration(); 100 101 // The "/" is mandatory, without it we've got a null pointer exception on the namenode 102 final String fileName = "/helloWorld"; 103 Path p = new Path(fileName); 104 105 final int repCount = 3; 106 assertTrue((short) cluster.getDataNodes().size() >= repCount); 107 108 // Let's write the file 109 FSDataOutputStream fop = dfs.create(p, (short) repCount); 110 final double toWrite = 875.5613; 111 fop.writeDouble(toWrite); 112 fop.close(); 113 114 for (int i = 0; i < 10; i++) { 115 // The interceptor is not set in this test, so we get the raw list at this point 116 LocatedBlocks lbs; 117 final long max = EnvironmentEdgeManager.currentTime() + 10000; 118 do { 119 lbs = getNamenode(dfs.getClient()).getBlockLocations(fileName, 0, 1); 120 assertNotNull(lbs.getLocatedBlocks()); 121 assertEquals(1, lbs.getLocatedBlocks().size()); 122 assertTrue(EnvironmentEdgeManager.currentTime() < max, 123 "Expecting " + repCount + " , got " + getLocatedBlockLocations(lbs.get(0)).length); 124 } while (getLocatedBlockLocations(lbs.get(0)).length != repCount); 125 126 // Should be filtered, the name is different => The order won't change 127 Object[] originalList = lbs.getLocatedBlocks().toArray(); 128 HFileSystem.ReorderWALBlocks lrb = new HFileSystem.ReorderWALBlocks(); 129 lrb.reorderBlocks(conf, lbs, fileName); 130 assertArrayEquals(originalList, lbs.getLocatedBlocks().toArray()); 131 132 // Should be reordered, as we pretend to be a file name with a compliant stuff 133 assertNotNull(conf.get(HConstants.HBASE_DIR)); 134 assertFalse(conf.get(HConstants.HBASE_DIR).isEmpty()); 135 String pseudoLogFile = conf.get(HConstants.HBASE_DIR) + "/" + HConstants.HREGION_LOGDIR_NAME 136 + "/" + host1 + ",6977,6576" + "/mylogfile"; 137 138 // Check that it will be possible to extract a ServerName from our construction 139 assertNotNull( 140 AbstractFSWALProvider.getServerNameFromWALDirectoryName(dfs.getConf(), pseudoLogFile), 141 "log= " + pseudoLogFile); 142 143 // And check we're doing the right reorder. 144 lrb.reorderBlocks(conf, lbs, pseudoLogFile); 145 assertEquals(host1, getLocatedBlockLocations(lbs.get(0))[2].getHostName()); 146 147 // Check again, it should remain the same. 148 lrb.reorderBlocks(conf, lbs, pseudoLogFile); 149 assertEquals(host1, getLocatedBlockLocations(lbs.get(0))[2].getHostName()); 150 } 151 } 152 153}