001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.fail; 021 022import java.lang.reflect.Method; 023import java.net.InetSocketAddress; 024import java.net.URI; 025import java.util.ArrayList; 026import java.util.List; 027import org.apache.hadoop.fs.BlockLocation; 028import org.apache.hadoop.fs.FileStatus; 029import org.apache.hadoop.fs.Path; 030import org.apache.hadoop.fs.permission.FsPermission; 031import org.apache.hadoop.hbase.HBaseClassTestRule; 032import org.apache.hadoop.hbase.HBaseTestingUtil; 033import org.apache.hadoop.hbase.TableName; 034import org.apache.hadoop.hbase.client.Table; 035import org.apache.hadoop.hbase.testclassification.MediumTests; 036import org.apache.hadoop.hbase.testclassification.RegionServerTests; 037import org.apache.hadoop.hbase.util.Bytes; 038import org.apache.hadoop.hdfs.DistributedFileSystem; 039import org.apache.hadoop.hdfs.server.datanode.DataNode; 040import org.apache.hadoop.util.Progressable; 041import org.junit.AfterClass; 042import org.junit.Assume; 043import org.junit.BeforeClass; 044import org.junit.ClassRule; 045import org.junit.Test; 046import org.junit.experimental.categories.Category; 047 048/** 049 * Tests the ability to specify favored nodes for a region. 050 */ 051@Category({ RegionServerTests.class, MediumTests.class }) 052public class TestRegionFavoredNodes { 053 054 @ClassRule 055 public static final HBaseClassTestRule CLASS_RULE = 056 HBaseClassTestRule.forClass(TestRegionFavoredNodes.class); 057 058 private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); 059 private static Table table; 060 private static final TableName TABLE_NAME = TableName.valueOf("table"); 061 private static final byte[] COLUMN_FAMILY = Bytes.toBytes("family"); 062 private static final int FAVORED_NODES_NUM = 3; 063 private static final int REGION_SERVERS = 6; 064 private static final int FLUSHES = 3; 065 private static Method createWithFavoredNode = null; 066 067 @BeforeClass 068 public static void setUpBeforeClass() throws Exception { 069 try { 070 createWithFavoredNode = DistributedFileSystem.class.getDeclaredMethod("create", Path.class, 071 FsPermission.class, boolean.class, int.class, short.class, long.class, Progressable.class, 072 InetSocketAddress[].class); 073 } catch (NoSuchMethodException nm) { 074 return; 075 } 076 TEST_UTIL.startMiniCluster(REGION_SERVERS); 077 table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, COLUMN_FAMILY); 078 TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME); 079 } 080 081 @AfterClass 082 public static void tearDownAfterClass() throws Exception { 083 // guard against failure in setup 084 if (table != null) { 085 table.close(); 086 } 087 if (createWithFavoredNode == null) { 088 return; 089 } 090 TEST_UTIL.shutdownMiniCluster(); 091 } 092 093 @Test 094 public void testFavoredNodes() throws Exception { 095 Assume.assumeTrue(createWithFavoredNode != null); 096 // Get the addresses of the datanodes in the cluster. 097 InetSocketAddress[] nodes = new InetSocketAddress[REGION_SERVERS]; 098 List<DataNode> datanodes = TEST_UTIL.getDFSCluster().getDataNodes(); 099 Method selfAddress; 100 try { 101 selfAddress = DataNode.class.getMethod("getSelfAddr"); 102 } catch (NoSuchMethodException ne) { 103 selfAddress = DataNode.class.getMethod("getXferAddress"); 104 } 105 for (int i = 0; i < REGION_SERVERS; i++) { 106 nodes[i] = (InetSocketAddress) selfAddress.invoke(datanodes.get(i)); 107 } 108 109 String[] nodeNames = new String[REGION_SERVERS]; 110 for (int i = 0; i < REGION_SERVERS; i++) { 111 nodeNames[i] = nodes[i].getAddress().getHostAddress() + ":" + nodes[i].getPort(); 112 } 113 114 // For each region, choose some datanodes as the favored nodes then assign 115 // them as favored nodes through the region. 116 for (int i = 0; i < REGION_SERVERS; i++) { 117 HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i); 118 List<HRegion> regions = server.getRegions(TABLE_NAME); 119 for (HRegion region : regions) { 120 List< 121 org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> favoredNodes = 122 new ArrayList<>(3); 123 String encodedRegionName = region.getRegionInfo().getEncodedName(); 124 for (int j = 0; j < FAVORED_NODES_NUM; j++) { 125 org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder b = 126 org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.newBuilder(); 127 b.setHostName(nodes[(i + j) % REGION_SERVERS].getAddress().getHostAddress()); 128 b.setPort(nodes[(i + j) % REGION_SERVERS].getPort()); 129 b.setStartCode(-1); 130 favoredNodes.add(b.build()); 131 } 132 server.updateRegionFavoredNodesMapping(encodedRegionName, favoredNodes); 133 } 134 } 135 136 // Write some data to each region and flush. Repeat some number of times to 137 // get multiple files for each region. 138 for (int i = 0; i < FLUSHES; i++) { 139 TEST_UTIL.loadTable(table, COLUMN_FAMILY, false); 140 TEST_UTIL.flush(); 141 } 142 143 // For each region, check the block locations of each file and ensure that 144 // they are consistent with the favored nodes for that region. 145 for (int i = 0; i < REGION_SERVERS; i++) { 146 HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i); 147 List<HRegion> regions = server.getRegions(TABLE_NAME); 148 for (HRegion region : regions) { 149 List<String> files = region.getStoreFileList(new byte[][] { COLUMN_FAMILY }); 150 for (String file : files) { 151 FileStatus status = TEST_UTIL.getDFSCluster().getFileSystem() 152 .getFileStatus(new Path(new URI(file).getPath())); 153 BlockLocation[] lbks = ((DistributedFileSystem) TEST_UTIL.getDFSCluster().getFileSystem()) 154 .getFileBlockLocations(status, 0, Long.MAX_VALUE); 155 for (BlockLocation lbk : lbks) { 156 locations: for (String info : lbk.getNames()) { 157 for (int j = 0; j < FAVORED_NODES_NUM; j++) { 158 if (info.equals(nodeNames[(i + j) % REGION_SERVERS])) { 159 continue locations; 160 } 161 } 162 // This block was at a location that was not a favored location. 163 fail("Block location " + info + " not a favored node"); 164 } 165 } 166 } 167 } 168 } 169 } 170}