001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.favored;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertFalse;
022import static org.junit.Assert.assertNotEquals;
023import static org.junit.Assert.assertNotNull;
024import static org.junit.Assert.assertTrue;
025import static org.mockito.ArgumentMatchers.any;
026import static org.mockito.Mockito.when;
027
028import java.io.IOException;
029import java.util.ArrayList;
030import java.util.HashMap;
031import java.util.List;
032import java.util.Map;
033import java.util.Set;
034import java.util.SortedMap;
035import java.util.TreeMap;
036import org.apache.hadoop.hbase.HBaseClassTestRule;
037import org.apache.hadoop.hbase.HConstants;
038import org.apache.hadoop.hbase.ServerName;
039import org.apache.hadoop.hbase.TableName;
040import org.apache.hadoop.hbase.client.RegionInfo;
041import org.apache.hadoop.hbase.client.RegionInfoBuilder;
042import org.apache.hadoop.hbase.master.RackManager;
043import org.apache.hadoop.hbase.testclassification.MasterTests;
044import org.apache.hadoop.hbase.testclassification.MediumTests;
045import org.apache.hadoop.hbase.util.Bytes;
046import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
047import org.apache.hadoop.hbase.util.Triple;
048import org.junit.BeforeClass;
049import org.junit.ClassRule;
050import org.junit.Rule;
051import org.junit.Test;
052import org.junit.experimental.categories.Category;
053import org.junit.rules.TestName;
054import org.mockito.Mockito;
055
056import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
057import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
058
059@Category({ MasterTests.class, MediumTests.class })
060public class TestFavoredNodeAssignmentHelper {
061
062  @ClassRule
063  public static final HBaseClassTestRule CLASS_RULE =
064    HBaseClassTestRule.forClass(TestFavoredNodeAssignmentHelper.class);
065
066  private static List<ServerName> servers = new ArrayList<>();
067  private static Map<String, List<ServerName>> rackToServers = new HashMap<>();
068  private static RackManager rackManager = Mockito.mock(RackManager.class);
069
070  // Some tests have randomness, so we run them multiple times
071  private static final int MAX_ATTEMPTS = 100;
072
073  @Rule
074  public TestName name = new TestName();
075
076  private static String getRack(int index) {
077    if (index < 10) {
078      return "rack1";
079    } else if (index < 20) {
080      return "rack2";
081    } else if (index < 30) {
082      return "rack3";
083    } else {
084      return RackManager.UNKNOWN_RACK;
085    }
086  }
087
088  @BeforeClass
089  public static void setupBeforeClass() throws Exception {
090    // Set up some server -> rack mappings
091    // Have three racks in the cluster with 10 hosts each.
092    when(rackManager.getRack(any(ServerName.class))).then(invocation -> {
093      ServerName sn = invocation.getArgument(0, ServerName.class);
094      try {
095        int i = Integer.parseInt(sn.getHostname().substring("foo".length()));
096        return getRack(i);
097      } catch (NumberFormatException e) {
098        return RackManager.UNKNOWN_RACK;
099      }
100    });
101    for (int i = 0; i < 40; i++) {
102      ServerName server = ServerName.valueOf("foo" + i, 1234, EnvironmentEdgeManager.currentTime());
103      String rack = getRack(i);
104      if (!rack.equals(RackManager.UNKNOWN_RACK)) {
105        rackToServers.computeIfAbsent(rack, k -> new ArrayList<>()).add(server);
106      }
107      servers.add(server);
108    }
109  }
110
111  // The tests decide which racks to work with, and how many machines to
112  // work with from any given rack
113  // Return a random 'count' number of servers from 'rack'
114  private static List<ServerName> getServersFromRack(Map<String, Integer> rackToServerCount) {
115    List<ServerName> chosenServers = new ArrayList<>();
116    for (Map.Entry<String, Integer> entry : rackToServerCount.entrySet()) {
117      List<ServerName> servers = rackToServers.get(entry.getKey());
118      for (int i = 0; i < entry.getValue(); i++) {
119        chosenServers.add(servers.get(i));
120      }
121    }
122    return chosenServers;
123  }
124
125  @Test
126  public void testSmallCluster() {
127    // Test the case where we cannot assign favored nodes (because the number
128    // of nodes in the cluster is too less)
129    Map<String, Integer> rackToServerCount = new HashMap<>();
130    rackToServerCount.put("rack1", 2);
131    List<ServerName> servers = getServersFromRack(rackToServerCount);
132    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
133    helper.initialize();
134    assertFalse(helper.canPlaceFavoredNodes());
135  }
136
137  @Test
138  public void testPlacePrimaryRSAsRoundRobin() {
139    // Test the regular case where there are many servers in different racks
140    // Test once for few regions and once for many regions
141    primaryRSPlacement(6, null, 10, 10, 10);
142    // now create lots of regions and try to place them on the limited number of machines
143    primaryRSPlacement(600, null, 10, 10, 10);
144  }
145
146  @Test
147  public void testRoundRobinAssignmentsWithUnevenSizedRacks() {
148    // In the case of uneven racks, the regions should be distributed
149    // proportionately to the rack sizes
150    primaryRSPlacement(6, null, 10, 10, 10);
151    primaryRSPlacement(600, null, 10, 10, 5);
152    primaryRSPlacement(600, null, 10, 5, 10);
153    primaryRSPlacement(600, null, 5, 10, 10);
154    primaryRSPlacement(500, null, 10, 10, 5);
155    primaryRSPlacement(500, null, 10, 5, 10);
156    primaryRSPlacement(500, null, 5, 10, 10);
157    primaryRSPlacement(500, null, 9, 7, 8);
158    primaryRSPlacement(500, null, 8, 7, 9);
159    primaryRSPlacement(500, null, 7, 9, 8);
160    primaryRSPlacement(459, null, 7, 9, 8);
161  }
162
163  @Test
164  public void testSecondaryAndTertiaryPlacementWithSingleRack() {
165    // Test the case where there is a single rack and we need to choose
166    // Primary/Secondary/Tertiary from a single rack.
167    Map<String, Integer> rackToServerCount = new HashMap<>();
168    rackToServerCount.put("rack1", 10);
169    // have lots of regions to test with
170    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper,
171      List<RegionInfo>> primaryRSMapAndHelper =
172        secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
173    FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
174    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
175    List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
176    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
177      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
178    // although we created lots of regions we should have no overlap on the
179    // primary/secondary/tertiary for any given region
180    for (RegionInfo region : regions) {
181      ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
182      assertNotNull(secondaryAndTertiaryServers);
183      assertTrue(primaryRSMap.containsKey(region));
184      assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region)));
185      assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region)));
186      assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1]));
187    }
188  }
189
190  @Test
191  public void testSecondaryAndTertiaryPlacementWithSingleServer() {
192    // Test the case where we have a single node in the cluster. In this case
193    // the primary can be assigned but the secondary/tertiary would be null
194    Map<String, Integer> rackToServerCount = new HashMap<>();
195    rackToServerCount.put("rack1", 1);
196    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper,
197      List<RegionInfo>> primaryRSMapAndHelper =
198        secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
199    FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
200    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
201    List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
202
203    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
204      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
205    // no secondary/tertiary placement in case of a single RegionServer
206    assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
207  }
208
209  @Test
210  public void testSecondaryAndTertiaryPlacementWithMultipleRacks() {
211    // Test the case where we have multiple racks and the region servers
212    // belong to multiple racks
213    Map<String, Integer> rackToServerCount = new HashMap<>();
214    rackToServerCount.put("rack1", 10);
215    rackToServerCount.put("rack2", 10);
216
217    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper,
218      List<RegionInfo>> primaryRSMapAndHelper =
219        secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
220    FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
221    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
222
223    assertTrue(primaryRSMap.size() == 60000);
224    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
225      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
226    assertTrue(secondaryAndTertiaryMap.size() == 60000);
227    // for every region, the primary should be on one rack and the secondary/tertiary
228    // on another (we create a lot of regions just to increase probability of failure)
229    for (Map.Entry<RegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) {
230      ServerName[] allServersForRegion = entry.getValue();
231      String primaryRSRack = rackManager.getRack(primaryRSMap.get(entry.getKey()));
232      String secondaryRSRack = rackManager.getRack(allServersForRegion[0]);
233      String tertiaryRSRack = rackManager.getRack(allServersForRegion[1]);
234      Set<String> racks = Sets.newHashSet(primaryRSRack);
235      racks.add(secondaryRSRack);
236      racks.add(tertiaryRSRack);
237      assertTrue(racks.size() >= 2);
238    }
239  }
240
241  @Test
242  public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
243    // Test the case where we have two racks but with less than two servers in each
244    // We will not have enough machines to select secondary/tertiary
245    Map<String, Integer> rackToServerCount = new HashMap<>();
246    rackToServerCount.put("rack1", 1);
247    rackToServerCount.put("rack2", 1);
248    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper,
249      List<RegionInfo>> primaryRSMapAndHelper =
250        secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
251    FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
252    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
253    List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
254    assertTrue(primaryRSMap.size() == 6);
255    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
256      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
257    for (RegionInfo region : regions) {
258      // not enough secondary/tertiary room to place the regions
259      assertTrue(secondaryAndTertiaryMap.get(region) == null);
260    }
261  }
262
263  @Test
264  public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack() {
265    // Test the case where there is only one server in one rack and another rack
266    // has more servers. We try to choose secondary/tertiary on different
267    // racks than what the primary is on. But if the other rack doesn't have
268    // enough nodes to have both secondary/tertiary RSs, the tertiary is placed
269    // on the same rack as the primary server is on
270    Map<String, Integer> rackToServerCount = new HashMap<>();
271    rackToServerCount.put("rack1", 2);
272    rackToServerCount.put("rack2", 1);
273    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper,
274      List<RegionInfo>> primaryRSMapAndHelper =
275        secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
276    FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
277    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
278    List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
279    assertTrue(primaryRSMap.size() == 6);
280    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
281      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
282    assertTrue(secondaryAndTertiaryMap.size() == regions.size());
283    for (RegionInfo region : regions) {
284      ServerName s = primaryRSMap.get(region);
285      ServerName secondaryRS = secondaryAndTertiaryMap.get(region)[0];
286      ServerName tertiaryRS = secondaryAndTertiaryMap.get(region)[1];
287      Set<String> racks = Sets.newHashSet(rackManager.getRack(s));
288      racks.add(rackManager.getRack(secondaryRS));
289      racks.add(rackManager.getRack(tertiaryRS));
290      assertTrue(racks.size() >= 2);
291    }
292  }
293
294  private Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
295    secondaryAndTertiaryRSPlacementHelper(int regionCount, Map<String, Integer> rackToServerCount) {
296    Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>();
297    List<ServerName> servers = getServersFromRack(rackToServerCount);
298    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
299    Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<ServerName, List<RegionInfo>>();
300    helper.initialize();
301    // create regions
302    List<RegionInfo> regions = new ArrayList<>(regionCount);
303    for (int i = 0; i < regionCount; i++) {
304      regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
305        .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build());
306    }
307    // place the regions
308    helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
309    return new Triple<>(primaryRSMap, helper, regions);
310  }
311
312  private void primaryRSPlacement(int regionCount, Map<RegionInfo, ServerName> primaryRSMap,
313    int firstRackSize, int secondRackSize, int thirdRackSize) {
314    Map<String, Integer> rackToServerCount = new HashMap<>();
315    rackToServerCount.put("rack1", firstRackSize);
316    rackToServerCount.put("rack2", secondRackSize);
317    rackToServerCount.put("rack3", thirdRackSize);
318    List<ServerName> servers = getServersFromRack(rackToServerCount);
319    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
320    helper.initialize();
321
322    assertTrue(helper.canPlaceFavoredNodes());
323
324    Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<>();
325    if (primaryRSMap == null) primaryRSMap = new HashMap<>();
326    // create some regions
327    List<RegionInfo> regions = new ArrayList<>(regionCount);
328    for (int i = 0; i < regionCount; i++) {
329      regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf("foobar"))
330        .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build());
331    }
332    // place those regions in primary RSs
333    helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
334
335    // we should have all the regions nicely spread across the racks
336    int regionsOnRack1 = 0;
337    int regionsOnRack2 = 0;
338    int regionsOnRack3 = 0;
339    for (RegionInfo region : regions) {
340      if (rackManager.getRack(primaryRSMap.get(region)).equals("rack1")) {
341        regionsOnRack1++;
342      } else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack2")) {
343        regionsOnRack2++;
344      } else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack3")) {
345        regionsOnRack3++;
346      }
347    }
348    // Verify that the regions got placed in the way we expect (documented in
349    // FavoredNodeAssignmentHelper#placePrimaryRSAsRoundRobin)
350    checkNumRegions(regionCount, firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1,
351      regionsOnRack2, regionsOnRack3, assignmentMap);
352  }
353
354  private void checkNumRegions(int regionCount, int firstRackSize, int secondRackSize,
355    int thirdRackSize, int regionsOnRack1, int regionsOnRack2, int regionsOnRack3,
356    Map<ServerName, List<RegionInfo>> assignmentMap) {
357    // The regions should be distributed proportionately to the racksizes
358    // Verify the ordering was as expected by inserting the racks and regions
359    // in sorted maps. The keys being the racksize and numregions; values are
360    // the relative positions of the racksizes and numregions respectively
361    SortedMap<Integer, Integer> rackMap = new TreeMap<>();
362    rackMap.put(firstRackSize, 1);
363    rackMap.put(secondRackSize, 2);
364    rackMap.put(thirdRackSize, 3);
365    SortedMap<Integer, Integer> regionMap = new TreeMap<>();
366    regionMap.put(regionsOnRack1, 1);
367    regionMap.put(regionsOnRack2, 2);
368    regionMap.put(regionsOnRack3, 3);
369    assertEquals(printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1,
370      regionsOnRack2, regionsOnRack3), rackMap.get(firstRackSize), regionMap.get(regionsOnRack1));
371    assertEquals(printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1,
372      regionsOnRack2, regionsOnRack3), rackMap.get(secondRackSize), regionMap.get(regionsOnRack2));
373    assertEquals(printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1,
374      regionsOnRack2, regionsOnRack3), rackMap.get(thirdRackSize), regionMap.get(regionsOnRack3));
375  }
376
377  private String printProportions(int firstRackSize, int secondRackSize, int thirdRackSize,
378    int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) {
379    return "The rack sizes " + firstRackSize + " " + secondRackSize + " " + thirdRackSize + " "
380      + regionsOnRack1 + " " + regionsOnRack2 + " " + regionsOnRack3;
381  }
382
383  @Test
384  public void testConstrainedPlacement() throws Exception {
385    List<ServerName> servers = Lists.newArrayList();
386    servers.add(ServerName.valueOf("foo" + 1 + ":1234", -1));
387    servers.add(ServerName.valueOf("foo" + 2 + ":1234", -1));
388    servers.add(ServerName.valueOf("foo" + 15 + ":1234", -1));
389    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
390    helper.initialize();
391    assertTrue(helper.canPlaceFavoredNodes());
392
393    List<RegionInfo> regions = new ArrayList<>(20);
394    for (int i = 0; i < 20; i++) {
395      regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
396        .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build());
397    }
398    Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<ServerName, List<RegionInfo>>();
399    Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>();
400    helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
401    assertTrue(primaryRSMap.size() == regions.size());
402    Map<RegionInfo, ServerName[]> secondaryAndTertiary =
403      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
404    assertEquals(regions.size(), secondaryAndTertiary.size());
405  }
406
407  @Test
408  public void testGetOneRandomRack() throws IOException {
409
410    Map<String, Integer> rackToServerCount = new HashMap<>();
411    Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3");
412    for (String rack : rackList) {
413      rackToServerCount.put(rack, 2);
414    }
415    List<ServerName> servers = getServersFromRack(rackToServerCount);
416
417    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
418    helper.initialize();
419    assertTrue(helper.canPlaceFavoredNodes());
420
421    // Check we don't get a bad rack on any number of attempts
422    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
423      assertTrue(rackList.contains(helper.getOneRandomRack(Sets.newHashSet())));
424    }
425
426    // Check skipRack multiple times when an invalid rack is specified
427    Set<String> skipRacks = Sets.newHashSet("rack");
428    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
429      assertTrue(rackList.contains(helper.getOneRandomRack(skipRacks)));
430    }
431
432    // Check skipRack multiple times when an valid rack is specified
433    skipRacks = Sets.newHashSet("rack1");
434    Set<String> validRacks = Sets.newHashSet("rack2", "rack3");
435    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
436      assertTrue(validRacks.contains(helper.getOneRandomRack(skipRacks)));
437    }
438  }
439
440  @Test
441  public void testGetRandomServerSingleRack() throws IOException {
442
443    Map<String, Integer> rackToServerCount = new HashMap<>();
444    final String rack = "rack1";
445    rackToServerCount.put(rack, 4);
446    List<ServerName> servers = getServersFromRack(rackToServerCount);
447
448    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
449    helper.initialize();
450    assertTrue(helper.canPlaceFavoredNodes());
451
452    // Check we don't get a bad node on any number of attempts
453    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
454      ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet());
455      assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn));
456    }
457
458    // Check skipServers multiple times when an invalid server is specified
459    Set<ServerName> skipServers =
460      Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE));
461    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
462      ServerName sn = helper.getOneRandomServer(rack, skipServers);
463      assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn));
464    }
465
466    // Check skipRack multiple times when an valid servers are specified
467    ServerName skipSN = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE);
468    skipServers = Sets.newHashSet(skipSN);
469    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
470      ServerName sn = helper.getOneRandomServer(rack, skipServers);
471      assertNotEquals("Skip server should not be selected ", skipSN.getAddress(), sn.getAddress());
472      assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn));
473    }
474  }
475
476  @Test
477  public void testGetRandomServerMultiRack() throws IOException {
478    Map<String, Integer> rackToServerCount = new HashMap<>();
479    Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3");
480    for (String rack : rackList) {
481      rackToServerCount.put(rack, 4);
482    }
483    List<ServerName> servers = getServersFromRack(rackToServerCount);
484
485    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
486    helper.initialize();
487    assertTrue(helper.canPlaceFavoredNodes());
488
489    // Check we don't get a bad node on any number of attempts
490    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
491      for (String rack : rackList) {
492        ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet());
493        assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack),
494          rackToServers.get(rack).contains(sn));
495      }
496    }
497
498    // Check skipServers multiple times when an invalid server is specified
499    Set<ServerName> skipServers =
500      Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE));
501    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
502      for (String rack : rackList) {
503        ServerName sn = helper.getOneRandomServer(rack, skipServers);
504        assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack),
505          rackToServers.get(rack).contains(sn));
506      }
507    }
508
509    // Check skipRack multiple times when an valid servers are specified
510    ServerName skipSN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE);
511    ServerName skipSN2 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE);
512    ServerName skipSN3 = ServerName.valueOf("foo20:1234", ServerName.NON_STARTCODE);
513    skipServers = Sets.newHashSet(skipSN1, skipSN2, skipSN3);
514    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
515      for (String rack : rackList) {
516        ServerName sn = helper.getOneRandomServer(rack, skipServers);
517        assertFalse("Skip server should not be selected ", skipServers.contains(sn));
518        assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack),
519          rackToServers.get(rack).contains(sn));
520      }
521    }
522  }
523
524  @Test
525  public void testGetFavoredNodes() throws IOException {
526    Map<String, Integer> rackToServerCount = new HashMap<>();
527    Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3");
528    for (String rack : rackList) {
529      rackToServerCount.put(rack, 4);
530    }
531    List<ServerName> servers = getServersFromRack(rackToServerCount);
532
533    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
534    helper.initialize();
535    assertTrue(helper.canPlaceFavoredNodes());
536
537    RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
538      .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).build();
539
540    for (int maxattempts = 0; maxattempts < MAX_ATTEMPTS; maxattempts++) {
541      List<ServerName> fn = helper.generateFavoredNodes(region);
542      checkDuplicateFN(fn);
543      checkFNRacks(fn);
544    }
545  }
546
547  @Test
548  public void testGenMissingFavoredNodeOneRack() throws IOException {
549    Map<String, Integer> rackToServerCount = new HashMap<>();
550    final String rack = "rack1";
551    rackToServerCount.put(rack, 6);
552    List<ServerName> servers = getServersFromRack(rackToServerCount);
553
554    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
555    helper.initialize();
556    assertTrue(helper.canPlaceFavoredNodes());
557
558    ServerName snRack1SN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE);
559    ServerName snRack1SN2 = ServerName.valueOf("foo2:1234", ServerName.NON_STARTCODE);
560    ServerName snRack1SN3 = ServerName.valueOf("foo3:1234", ServerName.NON_STARTCODE);
561
562    List<ServerName> fn = Lists.newArrayList(snRack1SN1, snRack1SN2);
563    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
564      checkDuplicateFN(fn, helper.generateMissingFavoredNode(fn));
565    }
566
567    fn = Lists.newArrayList(snRack1SN1, snRack1SN2);
568    List<ServerName> skipServers = Lists.newArrayList(snRack1SN3);
569    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
570      ServerName genSN = helper.generateMissingFavoredNode(fn, skipServers);
571      checkDuplicateFN(fn, genSN);
572      assertNotEquals("Generated FN should not match excluded one", snRack1SN3, genSN);
573    }
574  }
575
576  @Test
577  public void testGenMissingFavoredNodeMultiRack() throws IOException {
578
579    ServerName snRack1SN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE);
580    ServerName snRack1SN2 = ServerName.valueOf("foo2:1234", ServerName.NON_STARTCODE);
581    ServerName snRack2SN1 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE);
582    ServerName snRack2SN2 = ServerName.valueOf("foo11:1234", ServerName.NON_STARTCODE);
583
584    Map<String, Integer> rackToServerCount = new HashMap<>();
585    Set<String> rackList = Sets.newHashSet("rack1", "rack2");
586    for (String rack : rackList) {
587      rackToServerCount.put(rack, 4);
588    }
589    List<ServerName> servers = getServersFromRack(rackToServerCount);
590
591    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
592    helper.initialize();
593    assertTrue(helper.canPlaceFavoredNodes());
594
595    List<ServerName> fn = Lists.newArrayList(snRack1SN1, snRack1SN2);
596    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
597      ServerName genSN = helper.generateMissingFavoredNode(fn);
598      checkDuplicateFN(fn, genSN);
599      checkFNRacks(fn, genSN);
600    }
601
602    fn = Lists.newArrayList(snRack1SN1, snRack2SN1);
603    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
604      ServerName genSN = helper.generateMissingFavoredNode(fn);
605      checkDuplicateFN(fn, genSN);
606      checkFNRacks(fn, genSN);
607    }
608
609    fn = Lists.newArrayList(snRack1SN1, snRack2SN1);
610    List<ServerName> skipServers = Lists.newArrayList(snRack2SN2);
611    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
612      ServerName genSN = helper.generateMissingFavoredNode(fn, skipServers);
613      checkDuplicateFN(fn, genSN);
614      checkFNRacks(fn, genSN);
615      assertNotEquals("Generated FN should not match excluded one", snRack2SN2, genSN);
616    }
617  }
618
619  private void checkDuplicateFN(List<ServerName> fnList, ServerName genFN) {
620    Set<ServerName> favoredNodes = Sets.newHashSet(fnList);
621    assertNotNull("Generated FN can't be null", genFN);
622    favoredNodes.add(genFN);
623    assertEquals("Did not find expected number of favored nodes",
624      FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size());
625  }
626
627  private void checkDuplicateFN(List<ServerName> fnList) {
628    Set<ServerName> favoredNodes = Sets.newHashSet(fnList);
629    assertEquals("Did not find expected number of favored nodes",
630      FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size());
631  }
632
633  private void checkFNRacks(List<ServerName> fnList, ServerName genFN) {
634    Set<ServerName> favoredNodes = Sets.newHashSet(fnList);
635    favoredNodes.add(genFN);
636    Set<String> racks = Sets.newHashSet();
637    for (ServerName sn : favoredNodes) {
638      racks.add(rackManager.getRack(sn));
639    }
640    assertTrue("FN should be spread atleast across 2 racks", racks.size() >= 2);
641  }
642
643  private void checkFNRacks(List<ServerName> fnList) {
644    Set<ServerName> favoredNodes = Sets.newHashSet(fnList);
645    Set<String> racks = Sets.newHashSet();
646    for (ServerName sn : favoredNodes) {
647      racks.add(rackManager.getRack(sn));
648    }
649    assertTrue("FN should be spread atleast across 2 racks", racks.size() >= 2);
650  }
651}