001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.fail; 021 022import java.lang.reflect.Method; 023import java.net.InetSocketAddress; 024import java.net.URI; 025import java.util.ArrayList; 026import java.util.List; 027import org.apache.hadoop.fs.BlockLocation; 028import org.apache.hadoop.fs.FileStatus; 029import org.apache.hadoop.fs.Path; 030import org.apache.hadoop.fs.permission.FsPermission; 031import org.apache.hadoop.hbase.HBaseClassTestRule; 032import org.apache.hadoop.hbase.HBaseTestingUtility; 033import org.apache.hadoop.hbase.TableName; 034import org.apache.hadoop.hbase.client.Table; 035import org.apache.hadoop.hbase.testclassification.MediumTests; 036import org.apache.hadoop.hbase.testclassification.RegionServerTests; 037import org.apache.hadoop.hbase.util.Bytes; 038import org.apache.hadoop.hdfs.DistributedFileSystem; 039import org.apache.hadoop.hdfs.server.datanode.DataNode; 040import org.apache.hadoop.util.Progressable; 041import org.junit.AfterClass; 042import org.junit.Assume; 043import org.junit.BeforeClass; 044import org.junit.ClassRule; 045import org.junit.Test; 046import org.junit.experimental.categories.Category; 047 048/** 049 * Tests the ability to specify favored nodes for a region. 050 */ 051@Category({RegionServerTests.class, MediumTests.class}) 052public class TestRegionFavoredNodes { 053 054 @ClassRule 055 public static final HBaseClassTestRule CLASS_RULE = 056 HBaseClassTestRule.forClass(TestRegionFavoredNodes.class); 057 058 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 059 private static Table table; 060 private static final TableName TABLE_NAME = 061 TableName.valueOf("table"); 062 private static final byte[] COLUMN_FAMILY = Bytes.toBytes("family"); 063 private static final int FAVORED_NODES_NUM = 3; 064 private static final int REGION_SERVERS = 6; 065 private static final int FLUSHES = 3; 066 private static Method createWithFavoredNode = null; 067 068 @BeforeClass 069 public static void setUpBeforeClass() throws Exception { 070 try { 071 createWithFavoredNode = DistributedFileSystem.class.getDeclaredMethod("create", Path.class, 072 FsPermission.class, boolean.class, int.class, short.class, long.class, 073 Progressable.class, InetSocketAddress[].class); 074 } catch (NoSuchMethodException nm) { 075 return; 076 } 077 TEST_UTIL.startMiniCluster(REGION_SERVERS); 078 table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, COLUMN_FAMILY); 079 TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME); 080 } 081 082 @AfterClass 083 public static void tearDownAfterClass() throws Exception { 084 // guard against failure in setup 085 if (table != null) { 086 table.close(); 087 } 088 if (createWithFavoredNode == null) { 089 return; 090 } 091 TEST_UTIL.shutdownMiniCluster(); 092 } 093 094 @Test 095 public void testFavoredNodes() throws Exception { 096 Assume.assumeTrue(createWithFavoredNode != null); 097 // Get the addresses of the datanodes in the cluster. 098 InetSocketAddress[] nodes = new InetSocketAddress[REGION_SERVERS]; 099 List<DataNode> datanodes = TEST_UTIL.getDFSCluster().getDataNodes(); 100 Method selfAddress; 101 try { 102 selfAddress = DataNode.class.getMethod("getSelfAddr"); 103 } catch (NoSuchMethodException ne) { 104 selfAddress = DataNode.class.getMethod("getXferAddress"); 105 } 106 for (int i = 0; i < REGION_SERVERS; i++) { 107 nodes[i] = (InetSocketAddress)selfAddress.invoke(datanodes.get(i)); 108 } 109 110 String[] nodeNames = new String[REGION_SERVERS]; 111 for (int i = 0; i < REGION_SERVERS; i++) { 112 nodeNames[i] = nodes[i].getAddress().getHostAddress() + ":" + 113 nodes[i].getPort(); 114 } 115 116 // For each region, choose some datanodes as the favored nodes then assign 117 // them as favored nodes through the region. 118 for (int i = 0; i < REGION_SERVERS; i++) { 119 HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i); 120 List<HRegion> regions = server.getRegions(TABLE_NAME); 121 for (HRegion region : regions) { 122 List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName>favoredNodes = 123 new ArrayList<>(3); 124 String encodedRegionName = region.getRegionInfo().getEncodedName(); 125 for (int j = 0; j < FAVORED_NODES_NUM; j++) { 126 org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder b = 127 org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.newBuilder(); 128 b.setHostName(nodes[(i + j) % REGION_SERVERS].getAddress().getHostAddress()); 129 b.setPort(nodes[(i + j) % REGION_SERVERS].getPort()); 130 b.setStartCode(-1); 131 favoredNodes.add(b.build()); 132 } 133 server.updateRegionFavoredNodesMapping(encodedRegionName, favoredNodes); 134 } 135 } 136 137 // Write some data to each region and flush. Repeat some number of times to 138 // get multiple files for each region. 139 for (int i = 0; i < FLUSHES; i++) { 140 TEST_UTIL.loadTable(table, COLUMN_FAMILY, false); 141 TEST_UTIL.flush(); 142 } 143 144 // For each region, check the block locations of each file and ensure that 145 // they are consistent with the favored nodes for that region. 146 for (int i = 0; i < REGION_SERVERS; i++) { 147 HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i); 148 List<HRegion> regions = server.getRegions(TABLE_NAME); 149 for (HRegion region : regions) { 150 List<String> files = region.getStoreFileList(new byte[][]{COLUMN_FAMILY}); 151 for (String file : files) { 152 FileStatus status = TEST_UTIL.getDFSCluster().getFileSystem(). 153 getFileStatus(new Path(new URI(file).getPath())); 154 BlockLocation[] lbks = 155 ((DistributedFileSystem)TEST_UTIL.getDFSCluster().getFileSystem()) 156 .getFileBlockLocations(status, 0, Long.MAX_VALUE); 157 for (BlockLocation lbk : lbks) { 158 locations: 159 for (String info : lbk.getNames()) { 160 for (int j = 0; j < FAVORED_NODES_NUM; j++) { 161 if (info.equals(nodeNames[(i + j) % REGION_SERVERS])) { 162 continue locations; 163 } 164 } 165 // This block was at a location that was not a favored location. 166 fail("Block location " + info + " not a favored node"); 167 } 168 } 169 } 170 } 171 } 172 } 173}