001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.favored; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertNotEquals; 023import static org.junit.Assert.assertNotNull; 024import static org.junit.Assert.assertTrue; 025import static org.mockito.ArgumentMatchers.any; 026import static org.mockito.Mockito.when; 027 028import java.io.IOException; 029import java.util.ArrayList; 030import java.util.HashMap; 031import java.util.List; 032import java.util.Map; 033import java.util.NavigableMap; 034import java.util.Set; 035import java.util.TreeMap; 036import org.apache.hadoop.hbase.HBaseClassTestRule; 037import org.apache.hadoop.hbase.HConstants; 038import org.apache.hadoop.hbase.ServerName; 039import org.apache.hadoop.hbase.TableName; 040import org.apache.hadoop.hbase.client.RegionInfo; 041import org.apache.hadoop.hbase.client.RegionInfoBuilder; 042import org.apache.hadoop.hbase.master.RackManager; 043import org.apache.hadoop.hbase.testclassification.MasterTests; 044import org.apache.hadoop.hbase.testclassification.MediumTests; 045import org.apache.hadoop.hbase.util.Bytes; 046import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 047import org.apache.hadoop.hbase.util.Triple; 048import org.junit.BeforeClass; 049import org.junit.ClassRule; 050import org.junit.Rule; 051import org.junit.Test; 052import org.junit.experimental.categories.Category; 053import org.junit.rules.TestName; 054import org.mockito.Mockito; 055 056import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 057import org.apache.hbase.thirdparty.com.google.common.collect.Sets; 058 059@Category({ MasterTests.class, MediumTests.class }) 060public class TestFavoredNodeAssignmentHelper { 061 062 @ClassRule 063 public static final HBaseClassTestRule CLASS_RULE = 064 HBaseClassTestRule.forClass(TestFavoredNodeAssignmentHelper.class); 065 066 private static List<ServerName> servers = new ArrayList<>(); 067 private static Map<String, List<ServerName>> rackToServers = new HashMap<>(); 068 private static RackManager rackManager = Mockito.mock(RackManager.class); 069 070 // Some tests have randomness, so we run them multiple times 071 private static final int MAX_ATTEMPTS = 100; 072 073 @Rule 074 public TestName name = new TestName(); 075 076 private static String getRack(int index) { 077 if (index < 10) { 078 return "rack1"; 079 } else if (index < 20) { 080 return "rack2"; 081 } else if (index < 30) { 082 return "rack3"; 083 } else { 084 return RackManager.UNKNOWN_RACK; 085 } 086 } 087 088 @BeforeClass 089 public static void setupBeforeClass() throws Exception { 090 // Set up some server -> rack mappings 091 // Have three racks in the cluster with 10 hosts each. 092 when(rackManager.getRack(any(ServerName.class))).then(invocation -> { 093 ServerName sn = invocation.getArgument(0, ServerName.class); 094 try { 095 int i = Integer.parseInt(sn.getHostname().substring("foo".length())); 096 return getRack(i); 097 } catch (NumberFormatException e) { 098 return RackManager.UNKNOWN_RACK; 099 } 100 }); 101 for (int i = 0; i < 40; i++) { 102 ServerName server = ServerName.valueOf("foo" + i, 1234, EnvironmentEdgeManager.currentTime()); 103 String rack = getRack(i); 104 if (!rack.equals(RackManager.UNKNOWN_RACK)) { 105 rackToServers.computeIfAbsent(rack, k -> new ArrayList<>()).add(server); 106 } 107 servers.add(server); 108 } 109 } 110 111 // The tests decide which racks to work with, and how many machines to 112 // work with from any given rack 113 // Return a random 'count' number of servers from 'rack' 114 private static List<ServerName> getServersFromRack(Map<String, Integer> rackToServerCount) { 115 List<ServerName> chosenServers = new ArrayList<>(); 116 for (Map.Entry<String, Integer> entry : rackToServerCount.entrySet()) { 117 List<ServerName> servers = rackToServers.get(entry.getKey()); 118 for (int i = 0; i < entry.getValue(); i++) { 119 chosenServers.add(servers.get(i)); 120 } 121 } 122 return chosenServers; 123 } 124 125 @Test 126 public void testSmallCluster() { 127 // Test the case where we cannot assign favored nodes (because the number 128 // of nodes in the cluster is too less) 129 Map<String, Integer> rackToServerCount = new HashMap<>(); 130 rackToServerCount.put("rack1", 2); 131 List<ServerName> servers = getServersFromRack(rackToServerCount); 132 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 133 helper.initialize(); 134 assertFalse(helper.canPlaceFavoredNodes()); 135 } 136 137 @Test 138 public void testPlacePrimaryRSAsRoundRobin() { 139 // Test the regular case where there are many servers in different racks 140 // Test once for few regions and once for many regions 141 primaryRSPlacement(6, null, 10, 10, 10); 142 // now create lots of regions and try to place them on the limited number of machines 143 primaryRSPlacement(600, null, 10, 10, 10); 144 } 145 146 @Test 147 public void testRoundRobinAssignmentsWithUnevenSizedRacks() { 148 // In the case of uneven racks, the regions should be distributed 149 // proportionately to the rack sizes 150 primaryRSPlacement(6, null, 10, 10, 10); 151 primaryRSPlacement(600, null, 10, 10, 5); 152 primaryRSPlacement(600, null, 10, 5, 10); 153 primaryRSPlacement(600, null, 5, 10, 10); 154 primaryRSPlacement(500, null, 10, 10, 5); 155 primaryRSPlacement(500, null, 10, 5, 10); 156 primaryRSPlacement(500, null, 5, 10, 10); 157 primaryRSPlacement(500, null, 9, 7, 8); 158 primaryRSPlacement(500, null, 8, 7, 9); 159 primaryRSPlacement(500, null, 7, 9, 8); 160 primaryRSPlacement(459, null, 7, 9, 8); 161 } 162 163 @Test 164 public void testSecondaryAndTertiaryPlacementWithSingleRack() { 165 // Test the case where there is a single rack and we need to choose 166 // Primary/Secondary/Tertiary from a single rack. 167 Map<String, Integer> rackToServerCount = new HashMap<>(); 168 rackToServerCount.put("rack1", 10); 169 // have lots of regions to test with 170 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, 171 List<RegionInfo>> primaryRSMapAndHelper = 172 secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); 173 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 174 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 175 List<RegionInfo> regions = primaryRSMapAndHelper.getThird(); 176 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 177 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 178 // although we created lots of regions we should have no overlap on the 179 // primary/secondary/tertiary for any given region 180 for (RegionInfo region : regions) { 181 ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region); 182 assertNotNull(secondaryAndTertiaryServers); 183 assertTrue(primaryRSMap.containsKey(region)); 184 assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region))); 185 assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region))); 186 assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1])); 187 } 188 } 189 190 @Test 191 public void testSecondaryAndTertiaryPlacementWithSingleServer() { 192 // Test the case where we have a single node in the cluster. In this case 193 // the primary can be assigned but the secondary/tertiary would be null 194 Map<String, Integer> rackToServerCount = new HashMap<>(); 195 rackToServerCount.put("rack1", 1); 196 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, 197 List<RegionInfo>> primaryRSMapAndHelper = 198 secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount); 199 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 200 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 201 List<RegionInfo> regions = primaryRSMapAndHelper.getThird(); 202 203 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 204 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 205 // no secondary/tertiary placement in case of a single RegionServer 206 assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null); 207 } 208 209 @Test 210 public void testSecondaryAndTertiaryPlacementWithMultipleRacks() { 211 // Test the case where we have multiple racks and the region servers 212 // belong to multiple racks 213 Map<String, Integer> rackToServerCount = new HashMap<>(); 214 rackToServerCount.put("rack1", 10); 215 rackToServerCount.put("rack2", 10); 216 217 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, 218 List<RegionInfo>> primaryRSMapAndHelper = 219 secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); 220 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 221 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 222 223 assertTrue(primaryRSMap.size() == 60000); 224 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 225 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 226 assertTrue(secondaryAndTertiaryMap.size() == 60000); 227 // for every region, the primary should be on one rack and the secondary/tertiary 228 // on another (we create a lot of regions just to increase probability of failure) 229 for (Map.Entry<RegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) { 230 ServerName[] allServersForRegion = entry.getValue(); 231 String primaryRSRack = rackManager.getRack(primaryRSMap.get(entry.getKey())); 232 String secondaryRSRack = rackManager.getRack(allServersForRegion[0]); 233 String tertiaryRSRack = rackManager.getRack(allServersForRegion[1]); 234 Set<String> racks = Sets.newHashSet(primaryRSRack); 235 racks.add(secondaryRSRack); 236 racks.add(tertiaryRSRack); 237 assertTrue(racks.size() >= 2); 238 } 239 } 240 241 @Test 242 public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() { 243 // Test the case where we have two racks but with less than two servers in each 244 // We will not have enough machines to select secondary/tertiary 245 Map<String, Integer> rackToServerCount = new HashMap<>(); 246 rackToServerCount.put("rack1", 1); 247 rackToServerCount.put("rack2", 1); 248 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, 249 List<RegionInfo>> primaryRSMapAndHelper = 250 secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); 251 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 252 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 253 List<RegionInfo> regions = primaryRSMapAndHelper.getThird(); 254 assertTrue(primaryRSMap.size() == 6); 255 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 256 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 257 for (RegionInfo region : regions) { 258 // not enough secondary/tertiary room to place the regions 259 assertTrue(secondaryAndTertiaryMap.get(region) == null); 260 } 261 } 262 263 @Test 264 public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack() { 265 // Test the case where there is only one server in one rack and another rack 266 // has more servers. We try to choose secondary/tertiary on different 267 // racks than what the primary is on. But if the other rack doesn't have 268 // enough nodes to have both secondary/tertiary RSs, the tertiary is placed 269 // on the same rack as the primary server is on 270 Map<String, Integer> rackToServerCount = new HashMap<>(); 271 rackToServerCount.put("rack1", 2); 272 rackToServerCount.put("rack2", 1); 273 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, 274 List<RegionInfo>> primaryRSMapAndHelper = 275 secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); 276 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 277 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 278 List<RegionInfo> regions = primaryRSMapAndHelper.getThird(); 279 assertTrue(primaryRSMap.size() == 6); 280 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 281 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 282 assertTrue(secondaryAndTertiaryMap.size() == regions.size()); 283 for (RegionInfo region : regions) { 284 ServerName s = primaryRSMap.get(region); 285 ServerName secondaryRS = secondaryAndTertiaryMap.get(region)[0]; 286 ServerName tertiaryRS = secondaryAndTertiaryMap.get(region)[1]; 287 Set<String> racks = Sets.newHashSet(rackManager.getRack(s)); 288 racks.add(rackManager.getRack(secondaryRS)); 289 racks.add(rackManager.getRack(tertiaryRS)); 290 assertTrue(racks.size() >= 2); 291 } 292 } 293 294 private Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>> 295 secondaryAndTertiaryRSPlacementHelper(int regionCount, Map<String, Integer> rackToServerCount) { 296 Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>(); 297 List<ServerName> servers = getServersFromRack(rackToServerCount); 298 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 299 Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<ServerName, List<RegionInfo>>(); 300 helper.initialize(); 301 // create regions 302 List<RegionInfo> regions = new ArrayList<>(regionCount); 303 for (int i = 0; i < regionCount; i++) { 304 regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) 305 .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build()); 306 } 307 // place the regions 308 helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); 309 return new Triple<>(primaryRSMap, helper, regions); 310 } 311 312 private void primaryRSPlacement(int regionCount, Map<RegionInfo, ServerName> primaryRSMap, 313 int firstRackSize, int secondRackSize, int thirdRackSize) { 314 Map<String, Integer> rackToServerCount = new HashMap<>(); 315 rackToServerCount.put("rack1", firstRackSize); 316 rackToServerCount.put("rack2", secondRackSize); 317 rackToServerCount.put("rack3", thirdRackSize); 318 List<ServerName> servers = getServersFromRack(rackToServerCount); 319 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 320 helper.initialize(); 321 322 assertTrue(helper.canPlaceFavoredNodes()); 323 324 Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<>(); 325 if (primaryRSMap == null) { 326 primaryRSMap = new HashMap<>(); 327 } 328 // create some regions 329 List<RegionInfo> regions = new ArrayList<>(regionCount); 330 for (int i = 0; i < regionCount; i++) { 331 regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf("foobar")) 332 .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build()); 333 } 334 // place those regions in primary RSs 335 helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); 336 337 // we should have all the regions nicely spread across the racks 338 int regionsOnRack1 = 0; 339 int regionsOnRack2 = 0; 340 int regionsOnRack3 = 0; 341 for (RegionInfo region : regions) { 342 if (rackManager.getRack(primaryRSMap.get(region)).equals("rack1")) { 343 regionsOnRack1++; 344 } else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack2")) { 345 regionsOnRack2++; 346 } else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack3")) { 347 regionsOnRack3++; 348 } 349 } 350 // Verify that the regions got placed in the way we expect (documented in 351 // FavoredNodeAssignmentHelper#placePrimaryRSAsRoundRobin) 352 checkNumRegions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1, regionsOnRack2, 353 regionsOnRack3); 354 } 355 356 private void checkNumRegions(int firstRackSize, int secondRackSize, int thirdRackSize, 357 int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) { 358 // The regions should be distributed proportionately to the racksizes 359 // Verify the ordering was as expected by inserting the racks and regions 360 // in sorted maps. The keys being the racksize and numregions; values are 361 // the relative positions of the racksizes and numregions respectively 362 NavigableMap<Integer, Integer> rackMap = new TreeMap<>(); 363 rackMap.put(firstRackSize, 1); 364 rackMap.put(secondRackSize, 2); 365 rackMap.put(thirdRackSize, 3); 366 NavigableMap<Integer, Integer> regionMap = new TreeMap<>(); 367 regionMap.put(regionsOnRack1, 1); 368 regionMap.put(regionsOnRack2, 2); 369 regionMap.put(regionsOnRack3, 3); 370 assertEquals( 371 printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1, regionsOnRack2, 372 regionsOnRack3), 373 rackMap.get(firstRackSize).intValue(), regionMap.get(regionsOnRack1).intValue()); 374 assertEquals( 375 printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1, regionsOnRack2, 376 regionsOnRack3), 377 rackMap.get(secondRackSize).intValue(), regionMap.get(regionsOnRack2).intValue()); 378 assertEquals( 379 printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1, regionsOnRack2, 380 regionsOnRack3), 381 rackMap.get(thirdRackSize).intValue(), regionMap.get(regionsOnRack3).intValue()); 382 } 383 384 private String printProportions(int firstRackSize, int secondRackSize, int thirdRackSize, 385 int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) { 386 return "The rack sizes " + firstRackSize + " " + secondRackSize + " " + thirdRackSize + " " 387 + regionsOnRack1 + " " + regionsOnRack2 + " " + regionsOnRack3; 388 } 389 390 @Test 391 public void testConstrainedPlacement() throws Exception { 392 List<ServerName> servers = Lists.newArrayList(); 393 servers.add(ServerName.valueOf("foo" + 1 + ":1234", -1)); 394 servers.add(ServerName.valueOf("foo" + 2 + ":1234", -1)); 395 servers.add(ServerName.valueOf("foo" + 15 + ":1234", -1)); 396 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 397 helper.initialize(); 398 assertTrue(helper.canPlaceFavoredNodes()); 399 400 List<RegionInfo> regions = new ArrayList<>(20); 401 for (int i = 0; i < 20; i++) { 402 regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) 403 .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build()); 404 } 405 Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<ServerName, List<RegionInfo>>(); 406 Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>(); 407 helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); 408 assertTrue(primaryRSMap.size() == regions.size()); 409 Map<RegionInfo, ServerName[]> secondaryAndTertiary = 410 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 411 assertEquals(regions.size(), secondaryAndTertiary.size()); 412 } 413 414 @Test 415 public void testGetOneRandomRack() throws IOException { 416 417 Map<String, Integer> rackToServerCount = new HashMap<>(); 418 Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3"); 419 for (String rack : rackList) { 420 rackToServerCount.put(rack, 2); 421 } 422 List<ServerName> servers = getServersFromRack(rackToServerCount); 423 424 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 425 helper.initialize(); 426 assertTrue(helper.canPlaceFavoredNodes()); 427 428 // Check we don't get a bad rack on any number of attempts 429 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 430 assertTrue(rackList.contains(helper.getOneRandomRack(Sets.newHashSet()))); 431 } 432 433 // Check skipRack multiple times when an invalid rack is specified 434 Set<String> skipRacks = Sets.newHashSet("rack"); 435 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 436 assertTrue(rackList.contains(helper.getOneRandomRack(skipRacks))); 437 } 438 439 // Check skipRack multiple times when an valid rack is specified 440 skipRacks = Sets.newHashSet("rack1"); 441 Set<String> validRacks = Sets.newHashSet("rack2", "rack3"); 442 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 443 assertTrue(validRacks.contains(helper.getOneRandomRack(skipRacks))); 444 } 445 } 446 447 @Test 448 public void testGetRandomServerSingleRack() throws IOException { 449 450 Map<String, Integer> rackToServerCount = new HashMap<>(); 451 final String rack = "rack1"; 452 rackToServerCount.put(rack, 4); 453 List<ServerName> servers = getServersFromRack(rackToServerCount); 454 455 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 456 helper.initialize(); 457 assertTrue(helper.canPlaceFavoredNodes()); 458 459 // Check we don't get a bad node on any number of attempts 460 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 461 ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet()); 462 assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn)); 463 } 464 465 // Check skipServers multiple times when an invalid server is specified 466 Set<ServerName> skipServers = 467 Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE)); 468 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 469 ServerName sn = helper.getOneRandomServer(rack, skipServers); 470 assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn)); 471 } 472 473 // Check skipRack multiple times when an valid servers are specified 474 ServerName skipSN = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); 475 skipServers = Sets.newHashSet(skipSN); 476 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 477 ServerName sn = helper.getOneRandomServer(rack, skipServers); 478 assertNotEquals("Skip server should not be selected ", skipSN.getAddress(), sn.getAddress()); 479 assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn)); 480 } 481 } 482 483 @Test 484 public void testGetRandomServerMultiRack() throws IOException { 485 Map<String, Integer> rackToServerCount = new HashMap<>(); 486 Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3"); 487 for (String rack : rackList) { 488 rackToServerCount.put(rack, 4); 489 } 490 List<ServerName> servers = getServersFromRack(rackToServerCount); 491 492 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 493 helper.initialize(); 494 assertTrue(helper.canPlaceFavoredNodes()); 495 496 // Check we don't get a bad node on any number of attempts 497 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 498 for (String rack : rackList) { 499 ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet()); 500 assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack), 501 rackToServers.get(rack).contains(sn)); 502 } 503 } 504 505 // Check skipServers multiple times when an invalid server is specified 506 Set<ServerName> skipServers = 507 Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE)); 508 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 509 for (String rack : rackList) { 510 ServerName sn = helper.getOneRandomServer(rack, skipServers); 511 assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack), 512 rackToServers.get(rack).contains(sn)); 513 } 514 } 515 516 // Check skipRack multiple times when an valid servers are specified 517 ServerName skipSN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); 518 ServerName skipSN2 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE); 519 ServerName skipSN3 = ServerName.valueOf("foo20:1234", ServerName.NON_STARTCODE); 520 skipServers = Sets.newHashSet(skipSN1, skipSN2, skipSN3); 521 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 522 for (String rack : rackList) { 523 ServerName sn = helper.getOneRandomServer(rack, skipServers); 524 assertFalse("Skip server should not be selected ", skipServers.contains(sn)); 525 assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack), 526 rackToServers.get(rack).contains(sn)); 527 } 528 } 529 } 530 531 @Test 532 public void testGetFavoredNodes() throws IOException { 533 Map<String, Integer> rackToServerCount = new HashMap<>(); 534 Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3"); 535 for (String rack : rackList) { 536 rackToServerCount.put(rack, 4); 537 } 538 List<ServerName> servers = getServersFromRack(rackToServerCount); 539 540 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 541 helper.initialize(); 542 assertTrue(helper.canPlaceFavoredNodes()); 543 544 RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) 545 .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).build(); 546 547 for (int maxattempts = 0; maxattempts < MAX_ATTEMPTS; maxattempts++) { 548 List<ServerName> fn = helper.generateFavoredNodes(region); 549 checkDuplicateFN(fn); 550 checkFNRacks(fn); 551 } 552 } 553 554 @Test 555 public void testGenMissingFavoredNodeOneRack() throws IOException { 556 Map<String, Integer> rackToServerCount = new HashMap<>(); 557 final String rack = "rack1"; 558 rackToServerCount.put(rack, 6); 559 List<ServerName> servers = getServersFromRack(rackToServerCount); 560 561 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 562 helper.initialize(); 563 assertTrue(helper.canPlaceFavoredNodes()); 564 565 ServerName snRack1SN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); 566 ServerName snRack1SN2 = ServerName.valueOf("foo2:1234", ServerName.NON_STARTCODE); 567 ServerName snRack1SN3 = ServerName.valueOf("foo3:1234", ServerName.NON_STARTCODE); 568 569 List<ServerName> fn = Lists.newArrayList(snRack1SN1, snRack1SN2); 570 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 571 checkDuplicateFN(fn, helper.generateMissingFavoredNode(fn)); 572 } 573 574 fn = Lists.newArrayList(snRack1SN1, snRack1SN2); 575 List<ServerName> skipServers = Lists.newArrayList(snRack1SN3); 576 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 577 ServerName genSN = helper.generateMissingFavoredNode(fn, skipServers); 578 checkDuplicateFN(fn, genSN); 579 assertNotEquals("Generated FN should not match excluded one", snRack1SN3, genSN); 580 } 581 } 582 583 @Test 584 public void testGenMissingFavoredNodeMultiRack() throws IOException { 585 586 ServerName snRack1SN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); 587 ServerName snRack1SN2 = ServerName.valueOf("foo2:1234", ServerName.NON_STARTCODE); 588 ServerName snRack2SN1 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE); 589 ServerName snRack2SN2 = ServerName.valueOf("foo11:1234", ServerName.NON_STARTCODE); 590 591 Map<String, Integer> rackToServerCount = new HashMap<>(); 592 Set<String> rackList = Sets.newHashSet("rack1", "rack2"); 593 for (String rack : rackList) { 594 rackToServerCount.put(rack, 4); 595 } 596 List<ServerName> servers = getServersFromRack(rackToServerCount); 597 598 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 599 helper.initialize(); 600 assertTrue(helper.canPlaceFavoredNodes()); 601 602 List<ServerName> fn = Lists.newArrayList(snRack1SN1, snRack1SN2); 603 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 604 ServerName genSN = helper.generateMissingFavoredNode(fn); 605 checkDuplicateFN(fn, genSN); 606 checkFNRacks(fn, genSN); 607 } 608 609 fn = Lists.newArrayList(snRack1SN1, snRack2SN1); 610 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 611 ServerName genSN = helper.generateMissingFavoredNode(fn); 612 checkDuplicateFN(fn, genSN); 613 checkFNRacks(fn, genSN); 614 } 615 616 fn = Lists.newArrayList(snRack1SN1, snRack2SN1); 617 List<ServerName> skipServers = Lists.newArrayList(snRack2SN2); 618 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 619 ServerName genSN = helper.generateMissingFavoredNode(fn, skipServers); 620 checkDuplicateFN(fn, genSN); 621 checkFNRacks(fn, genSN); 622 assertNotEquals("Generated FN should not match excluded one", snRack2SN2, genSN); 623 } 624 } 625 626 private void checkDuplicateFN(List<ServerName> fnList, ServerName genFN) { 627 Set<ServerName> favoredNodes = Sets.newHashSet(fnList); 628 assertNotNull("Generated FN can't be null", genFN); 629 favoredNodes.add(genFN); 630 assertEquals("Did not find expected number of favored nodes", 631 FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size()); 632 } 633 634 private void checkDuplicateFN(List<ServerName> fnList) { 635 Set<ServerName> favoredNodes = Sets.newHashSet(fnList); 636 assertEquals("Did not find expected number of favored nodes", 637 FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size()); 638 } 639 640 private void checkFNRacks(List<ServerName> fnList, ServerName genFN) { 641 Set<ServerName> favoredNodes = Sets.newHashSet(fnList); 642 favoredNodes.add(genFN); 643 Set<String> racks = Sets.newHashSet(); 644 for (ServerName sn : favoredNodes) { 645 racks.add(rackManager.getRack(sn)); 646 } 647 assertTrue("FN should be spread atleast across 2 racks", racks.size() >= 2); 648 } 649 650 private void checkFNRacks(List<ServerName> fnList) { 651 Set<ServerName> favoredNodes = Sets.newHashSet(fnList); 652 Set<String> racks = Sets.newHashSet(); 653 for (ServerName sn : favoredNodes) { 654 racks.add(rackManager.getRack(sn)); 655 } 656 assertTrue("FN should be spread atleast across 2 racks", racks.size() >= 2); 657 } 658}