001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master.balancer; 019 020import static org.junit.Assert.assertNotNull; 021import static org.junit.Assert.assertNull; 022import static org.junit.Assert.assertTrue; 023import static org.mockito.Mockito.mock; 024import static org.mockito.Mockito.when; 025 026import java.util.ArrayList; 027import java.util.HashMap; 028import java.util.HashSet; 029import java.util.LinkedHashMap; 030import java.util.LinkedList; 031import java.util.List; 032import java.util.Map; 033import java.util.Map.Entry; 034import java.util.Queue; 035import java.util.Random; 036import java.util.Set; 037import java.util.SortedSet; 038import java.util.TreeMap; 039import java.util.TreeSet; 040import java.util.concurrent.ThreadLocalRandom; 041import java.util.stream.Collectors; 042import java.util.stream.Stream; 043import org.apache.hadoop.conf.Configuration; 044import org.apache.hadoop.hbase.HBaseConfiguration; 045import org.apache.hadoop.hbase.ServerName; 046import org.apache.hadoop.hbase.TableName; 047import org.apache.hadoop.hbase.client.RegionInfo; 048import org.apache.hadoop.hbase.client.RegionInfoBuilder; 049import org.apache.hadoop.hbase.client.RegionReplicaUtil; 050import org.apache.hadoop.hbase.master.MasterServices; 051import org.apache.hadoop.hbase.master.RackManager; 052import org.apache.hadoop.hbase.master.RegionPlan; 053import org.apache.hadoop.hbase.util.Bytes; 054import org.apache.hadoop.net.DNSToSwitchMapping; 055import org.junit.Assert; 056import org.junit.BeforeClass; 057import org.slf4j.Logger; 058import org.slf4j.LoggerFactory; 059 060/** 061 * Class used to be the base of unit tests on load balancers. It gives helper methods to create maps 062 * of {@link ServerName} to lists of {@link RegionInfo} and to check list of region plans. 063 */ 064public class BalancerTestBase { 065 private static final Logger LOG = LoggerFactory.getLogger(BalancerTestBase.class); 066 static int regionId = 0; 067 protected static Configuration conf; 068 protected static StochasticLoadBalancer loadBalancer; 069 070 protected static DummyMetricsStochasticBalancer dummyMetricsStochasticBalancer = 071 new DummyMetricsStochasticBalancer(); 072 073 @BeforeClass 074 public static void beforeAllTests() throws Exception { 075 conf = HBaseConfiguration.create(); 076 conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class); 077 conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0); 078 loadBalancer = new StochasticLoadBalancer(dummyMetricsStochasticBalancer); 079 MasterServices services = mock(MasterServices.class); 080 when(services.getConfiguration()).thenReturn(conf); 081 loadBalancer.setMasterServices(services); 082 loadBalancer.initialize(); 083 } 084 085 protected int[] largeCluster = new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 086 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 087 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 088 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 089 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 090 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 091 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 092 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 093 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 094 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 095 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 096 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 097 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 }; 098 099 // int[testnum][servernumber] -> numregions 100 protected int[][] clusterStateMocks = new int[][] { 101 // 1 node 102 new int[] { 0 }, new int[] { 1 }, new int[] { 10 }, 103 // 2 node 104 new int[] { 0, 0 }, new int[] { 2, 0 }, new int[] { 2, 1 }, new int[] { 2, 2 }, 105 new int[] { 2, 3 }, new int[] { 2, 4 }, new int[] { 1, 1 }, new int[] { 0, 1 }, 106 new int[] { 10, 1 }, new int[] { 514, 1432 }, new int[] { 48, 53 }, 107 // 3 node 108 new int[] { 0, 1, 2 }, new int[] { 1, 2, 3 }, new int[] { 0, 2, 2 }, new int[] { 0, 3, 0 }, 109 new int[] { 0, 4, 0 }, new int[] { 20, 20, 0 }, 110 // 4 node 111 new int[] { 0, 1, 2, 3 }, new int[] { 4, 0, 0, 0 }, new int[] { 5, 0, 0, 0 }, 112 new int[] { 6, 6, 0, 0 }, new int[] { 6, 2, 0, 0 }, new int[] { 6, 1, 0, 0 }, 113 new int[] { 6, 0, 0, 0 }, new int[] { 4, 4, 4, 7 }, new int[] { 4, 4, 4, 8 }, 114 new int[] { 0, 0, 0, 7 }, 115 // 5 node 116 new int[] { 1, 1, 1, 1, 4 }, 117 // 6 nodes 118 new int[] { 1500, 500, 500, 500, 10, 0 }, new int[] { 1500, 500, 500, 500, 500, 0 }, 119 // more nodes 120 new int[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, 121 new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 }, new int[] { 6, 6, 5, 6, 6, 6, 6, 6, 6, 1 }, 122 new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 54 }, new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 55 }, 123 new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 }, new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 16 }, 124 new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 8 }, new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 9 }, 125 new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 10 }, new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 123 }, 126 new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 155 }, new int[] { 10, 7, 12, 8, 11, 10, 9, 14 }, 127 new int[] { 13, 14, 6, 10, 10, 10, 8, 10 }, new int[] { 130, 14, 60, 10, 100, 10, 80, 10 }, 128 new int[] { 130, 140, 60, 100, 100, 100, 80, 100 }, new int[] { 0, 5, 5, 5, 5 }, largeCluster, 129 130 }; 131 132 // int[testnum][servernumber] -> numregions 133 protected int[][] clusterStateMocksWithNoSlop = new int[][] { 134 // 1 node 135 new int[] { 0 }, new int[] { 1 }, new int[] { 10 }, 136 // 2 node 137 new int[] { 0, 0 }, new int[] { 2, 1 }, new int[] { 2, 2 }, new int[] { 2, 3 }, 138 new int[] { 1, 1 }, new int[] { 80, 120 }, new int[] { 1428, 1432 }, 139 // more nodes 140 new int[] { 100, 90, 120, 90, 110, 100, 90, 120 }, }; 141 142 // int[testnum][servernumber] -> numregions 143 protected int[][] clusterStateMocksWithSlop = new int[][] { 144 // 2 node 145 new int[] { 1, 4 }, new int[] { 10, 20 }, new int[] { 80, 123 }, 146 // more nodes 147 new int[] { 100, 100, 100, 100, 100, 100, 100, 100, 100, 200 }, 148 new int[] { 10, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 149 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 150 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 151 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }, }; 152 153 // This class is introduced because IP to rack resolution can be lengthy. 154 public static class MockMapping implements DNSToSwitchMapping { 155 public MockMapping(Configuration conf) { 156 } 157 158 @Override 159 public List<String> resolve(List<String> names) { 160 return Stream.generate(() -> "rack").limit(names.size()).collect(Collectors.toList()); 161 } 162 163 // do not add @Override annotations here. It mighty break compilation with earlier Hadoops 164 public void reloadCachedMappings() { 165 } 166 167 // do not add @Override annotations here. It mighty break compilation with earlier Hadoops 168 public void reloadCachedMappings(List<String> arg0) { 169 } 170 } 171 172 /** 173 * Invariant is that all servers have between floor(avg) and ceiling(avg) number of regions. 174 */ 175 public void assertClusterAsBalanced(List<ServerAndLoad> servers) { 176 int numServers = servers.size(); 177 int numRegions = 0; 178 int maxRegions = 0; 179 int minRegions = Integer.MAX_VALUE; 180 for (ServerAndLoad server : servers) { 181 int nr = server.getLoad(); 182 if (nr > maxRegions) { 183 maxRegions = nr; 184 } 185 if (nr < minRegions) { 186 minRegions = nr; 187 } 188 numRegions += nr; 189 } 190 if (maxRegions - minRegions < 2) { 191 // less than 2 between max and min, can't balance 192 return; 193 } 194 int min = numRegions / numServers; 195 int max = numRegions % numServers == 0 ? min : min + 1; 196 197 for (ServerAndLoad server : servers) { 198 assertTrue("All servers should have a positive load. " + server, server.getLoad() >= 0); 199 assertTrue("All servers should have load no more than " + max + ". " + server, 200 server.getLoad() <= max); 201 assertTrue("All servers should have load no less than " + min + ". " + server, 202 server.getLoad() >= min); 203 } 204 } 205 206 /** 207 * Invariant is that all servers have between acceptable range number of regions. 208 */ 209 public boolean assertClusterOverallAsBalanced(List<ServerAndLoad> servers, int tablenum) { 210 int numServers = servers.size(); 211 int numRegions = 0; 212 int maxRegions = 0; 213 int minRegions = Integer.MAX_VALUE; 214 for (ServerAndLoad server : servers) { 215 int nr = server.getLoad(); 216 if (nr > maxRegions) { 217 maxRegions = nr; 218 } 219 if (nr < minRegions) { 220 minRegions = nr; 221 } 222 numRegions += nr; 223 } 224 if (maxRegions - minRegions < 2) { 225 // less than 2 between max and min, can't balance 226 return true; 227 } 228 int min = numRegions / numServers; 229 int max = numRegions % numServers == 0 ? min : min + 1; 230 231 for (ServerAndLoad server : servers) { 232 // The '5' in below is arbitrary. 233 if ( 234 server.getLoad() < 0 || server.getLoad() > max + (tablenum / 2 + 5) 235 || server.getLoad() < (min - tablenum / 2 - 5) 236 ) { 237 LOG.warn("server={}, load={}, max={}, tablenum={}, min={}", server.getServerName(), 238 server.getLoad(), max, tablenum, min); 239 return false; 240 } 241 } 242 return true; 243 } 244 245 /** 246 * Checks whether region replicas are not hosted on the same host. 247 */ 248 public void assertRegionReplicaPlacement(Map<ServerName, List<RegionInfo>> serverMap, 249 RackManager rackManager) { 250 TreeMap<String, Set<RegionInfo>> regionsPerHost = new TreeMap<>(); 251 TreeMap<String, Set<RegionInfo>> regionsPerRack = new TreeMap<>(); 252 253 for (Entry<ServerName, List<RegionInfo>> entry : serverMap.entrySet()) { 254 String hostname = entry.getKey().getHostname(); 255 Set<RegionInfo> infos = regionsPerHost.get(hostname); 256 if (infos == null) { 257 infos = new HashSet<>(); 258 regionsPerHost.put(hostname, infos); 259 } 260 261 for (RegionInfo info : entry.getValue()) { 262 RegionInfo primaryInfo = RegionReplicaUtil.getRegionInfoForDefaultReplica(info); 263 if (!infos.add(primaryInfo)) { 264 Assert.fail("Two or more region replicas are hosted on the same host after balance"); 265 } 266 } 267 } 268 269 if (rackManager == null) { 270 return; 271 } 272 273 for (Entry<ServerName, List<RegionInfo>> entry : serverMap.entrySet()) { 274 String rack = rackManager.getRack(entry.getKey()); 275 Set<RegionInfo> infos = regionsPerRack.get(rack); 276 if (infos == null) { 277 infos = new HashSet<>(); 278 regionsPerRack.put(rack, infos); 279 } 280 281 for (RegionInfo info : entry.getValue()) { 282 RegionInfo primaryInfo = RegionReplicaUtil.getRegionInfoForDefaultReplica(info); 283 if (!infos.add(primaryInfo)) { 284 Assert.fail("Two or more region replicas are hosted on the same rack after balance"); 285 } 286 } 287 } 288 } 289 290 protected String printStats(List<ServerAndLoad> servers) { 291 int numServers = servers.size(); 292 int totalRegions = 0; 293 for (ServerAndLoad server : servers) { 294 totalRegions += server.getLoad(); 295 } 296 float average = (float) totalRegions / numServers; 297 int max = (int) Math.ceil(average); 298 int min = (int) Math.floor(average); 299 return "[srvr=" + numServers + " rgns=" + totalRegions + " avg=" + average + " max=" + max 300 + " min=" + min + "]"; 301 } 302 303 protected List<ServerAndLoad> convertToList(final Map<ServerName, List<RegionInfo>> servers) { 304 List<ServerAndLoad> list = new ArrayList<>(servers.size()); 305 for (Map.Entry<ServerName, List<RegionInfo>> e : servers.entrySet()) { 306 list.add(new ServerAndLoad(e.getKey(), e.getValue().size())); 307 } 308 return list; 309 } 310 311 protected String printMock(List<ServerAndLoad> balancedCluster) { 312 SortedSet<ServerAndLoad> sorted = new TreeSet<>(balancedCluster); 313 ServerAndLoad[] arr = sorted.toArray(new ServerAndLoad[sorted.size()]); 314 StringBuilder sb = new StringBuilder(sorted.size() * 4 + 4); 315 sb.append("{ "); 316 for (int i = 0; i < arr.length; i++) { 317 if (i != 0) { 318 sb.append(" , "); 319 } 320 sb.append(arr[i].getServerName().getHostname()); 321 sb.append(":"); 322 sb.append(arr[i].getLoad()); 323 } 324 sb.append(" }"); 325 return sb.toString(); 326 } 327 328 /** 329 * This assumes the RegionPlan HSI instances are the same ones in the map, so actually no need to 330 * even pass in the map, but I think it's clearer. 331 * @return a list of all added {@link ServerAndLoad} values. 332 */ 333 protected List<ServerAndLoad> reconcile(List<ServerAndLoad> list, List<RegionPlan> plans, 334 Map<ServerName, List<RegionInfo>> servers) { 335 List<ServerAndLoad> result = new ArrayList<>(list.size()); 336 337 Map<ServerName, ServerAndLoad> map = new HashMap<>(list.size()); 338 for (ServerAndLoad sl : list) { 339 map.put(sl.getServerName(), sl); 340 } 341 if (plans != null) { 342 for (RegionPlan plan : plans) { 343 ServerName source = plan.getSource(); 344 345 updateLoad(map, source, -1); 346 ServerName destination = plan.getDestination(); 347 updateLoad(map, destination, +1); 348 349 servers.get(source).remove(plan.getRegionInfo()); 350 servers.get(destination).add(plan.getRegionInfo()); 351 } 352 } 353 result.clear(); 354 result.addAll(map.values()); 355 return result; 356 } 357 358 protected void updateLoad(final Map<ServerName, ServerAndLoad> map, final ServerName sn, 359 final int diff) { 360 ServerAndLoad sal = map.get(sn); 361 if (sal == null) sal = new ServerAndLoad(sn, 0); 362 sal = new ServerAndLoad(sn, sal.getLoad() + diff); 363 map.put(sn, sal); 364 } 365 366 protected TreeMap<ServerName, List<RegionInfo>> mockClusterServers(int[][] mockCluster) { 367 // dimension1: table, dimension2: regions per server 368 int numTables = mockCluster.length; 369 TreeMap<ServerName, List<RegionInfo>> servers = new TreeMap<>(); 370 for (int i = 0; i < numTables; i++) { 371 TableName tableName = TableName.valueOf("table" + i); 372 for (int j = 0; j < mockCluster[i].length; j++) { 373 ServerName serverName = ServerName.valueOf("server" + j, 1000, -1); 374 int numRegions = mockCluster[i][j]; 375 List<RegionInfo> regions = createRegions(numRegions, tableName); 376 servers.putIfAbsent(serverName, new ArrayList<>()); 377 servers.get(serverName).addAll(regions); 378 } 379 } 380 return servers; 381 } 382 383 protected TreeMap<ServerName, List<RegionInfo>> mockClusterServers(int[] mockCluster) { 384 return mockClusterServers(mockCluster, -1); 385 } 386 387 protected BalancerClusterState mockCluster(int[] mockCluster) { 388 return new BalancerClusterState(mockClusterServers(mockCluster, -1), null, null, null); 389 } 390 391 protected TreeMap<ServerName, List<RegionInfo>> mockClusterServers(int[] mockCluster, 392 int numTables) { 393 int numServers = mockCluster.length; 394 TreeMap<ServerName, List<RegionInfo>> servers = new TreeMap<>(); 395 for (int i = 0; i < numServers; i++) { 396 int numRegions = mockCluster[i]; 397 ServerAndLoad sal = randomServer(0); 398 List<RegionInfo> regions = randomRegions(numRegions, numTables); 399 servers.put(sal.getServerName(), regions); 400 } 401 return servers; 402 } 403 404 protected Map<ServerName, List<RegionInfo>> mockClusterServersUnsorted(int[] mockCluster, 405 int numTables) { 406 int numServers = mockCluster.length; 407 Map<ServerName, List<RegionInfo>> servers = new LinkedHashMap<>(); 408 for (int i = 0; i < numServers; i++) { 409 int numRegions = mockCluster[i]; 410 ServerAndLoad sal = randomServer(0); 411 List<RegionInfo> regions = randomRegions(numRegions, numTables); 412 servers.put(sal.getServerName(), regions); 413 } 414 return servers; 415 } 416 417 protected TreeMap<ServerName, List<RegionInfo>> mockUniformClusterServers(int[] mockCluster) { 418 int numServers = mockCluster.length; 419 TreeMap<ServerName, List<RegionInfo>> servers = new TreeMap<>(); 420 for (int i = 0; i < numServers; i++) { 421 int numRegions = mockCluster[i]; 422 ServerAndLoad sal = randomServer(0); 423 List<RegionInfo> regions = uniformRegions(numRegions); 424 servers.put(sal.getServerName(), regions); 425 } 426 return servers; 427 } 428 429 protected HashMap<TableName, TreeMap<ServerName, List<RegionInfo>>> 430 mockClusterServersWithTables(Map<ServerName, List<RegionInfo>> clusterServers) { 431 HashMap<TableName, TreeMap<ServerName, List<RegionInfo>>> result = new HashMap<>(); 432 for (Map.Entry<ServerName, List<RegionInfo>> entry : clusterServers.entrySet()) { 433 ServerName sal = entry.getKey(); 434 List<RegionInfo> regions = entry.getValue(); 435 for (RegionInfo hri : regions) { 436 TreeMap<ServerName, List<RegionInfo>> servers = result.get(hri.getTable()); 437 if (servers == null) { 438 servers = new TreeMap<>(); 439 result.put(hri.getTable(), servers); 440 } 441 List<RegionInfo> hrilist = servers.get(sal); 442 if (hrilist == null) { 443 hrilist = new ArrayList<>(); 444 servers.put(sal, hrilist); 445 } 446 hrilist.add(hri); 447 } 448 } 449 for (Map.Entry<TableName, TreeMap<ServerName, List<RegionInfo>>> entry : result.entrySet()) { 450 for (ServerName srn : clusterServers.keySet()) { 451 if (!entry.getValue().containsKey(srn)) entry.getValue().put(srn, new ArrayList<>()); 452 } 453 } 454 return result; 455 } 456 457 private Queue<RegionInfo> regionQueue = new LinkedList<>(); 458 459 protected List<RegionInfo> randomRegions(int numRegions) { 460 return randomRegions(numRegions, -1); 461 } 462 463 protected List<RegionInfo> createRegions(int numRegions, TableName tableName) { 464 List<RegionInfo> regions = new ArrayList<>(numRegions); 465 byte[] start = new byte[16]; 466 Bytes.random(start); 467 byte[] end = new byte[16]; 468 Bytes.random(end); 469 for (int i = 0; i < numRegions; i++) { 470 Bytes.putInt(start, 0, numRegions << 1); 471 Bytes.putInt(end, 0, (numRegions << 1) + 1); 472 RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end) 473 .setSplit(false).build(); 474 regions.add(hri); 475 } 476 return regions; 477 } 478 479 protected List<RegionInfo> randomRegions(int numRegions, int numTables) { 480 List<RegionInfo> regions = new ArrayList<>(numRegions); 481 byte[] start = new byte[16]; 482 Bytes.random(start); 483 byte[] end = new byte[16]; 484 Bytes.random(end); 485 for (int i = 0; i < numRegions; i++) { 486 if (!regionQueue.isEmpty()) { 487 regions.add(regionQueue.poll()); 488 continue; 489 } 490 Bytes.putInt(start, 0, numRegions << 1); 491 Bytes.putInt(end, 0, (numRegions << 1) + 1); 492 TableName tableName = TableName 493 .valueOf("table" + (numTables > 0 ? ThreadLocalRandom.current().nextInt(numTables) : i)); 494 RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end) 495 .setSplit(false).setRegionId(regionId++).build(); 496 regions.add(hri); 497 } 498 return regions; 499 } 500 501 protected List<RegionInfo> uniformRegions(int numRegions) { 502 List<RegionInfo> regions = new ArrayList<>(numRegions); 503 byte[] start = new byte[16]; 504 Bytes.random(start); 505 byte[] end = new byte[16]; 506 Bytes.random(end); 507 for (int i = 0; i < numRegions; i++) { 508 Bytes.putInt(start, 0, numRegions << 1); 509 Bytes.putInt(end, 0, (numRegions << 1) + 1); 510 TableName tableName = TableName.valueOf("table" + i); 511 RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end) 512 .setSplit(false).build(); 513 regions.add(hri); 514 } 515 return regions; 516 } 517 518 protected void returnRegions(List<RegionInfo> regions) { 519 regionQueue.addAll(regions); 520 } 521 522 private Queue<ServerName> serverQueue = new LinkedList<>(); 523 524 protected ServerAndLoad randomServer(final int numRegionsPerServer) { 525 if (!this.serverQueue.isEmpty()) { 526 ServerName sn = this.serverQueue.poll(); 527 return new ServerAndLoad(sn, numRegionsPerServer); 528 } 529 Random rand = ThreadLocalRandom.current(); 530 String host = "srv" + rand.nextInt(Integer.MAX_VALUE); 531 int port = rand.nextInt(60000); 532 long startCode = rand.nextLong(); 533 ServerName sn = ServerName.valueOf(host, port, startCode); 534 return new ServerAndLoad(sn, numRegionsPerServer); 535 } 536 537 protected List<ServerAndLoad> randomServers(int numServers, int numRegionsPerServer) { 538 List<ServerAndLoad> servers = new ArrayList<>(numServers); 539 for (int i = 0; i < numServers; i++) { 540 servers.add(randomServer(numRegionsPerServer)); 541 } 542 return servers; 543 } 544 545 protected void returnServer(ServerName server) { 546 serverQueue.add(server); 547 } 548 549 protected void returnServers(List<ServerName> servers) { 550 this.serverQueue.addAll(servers); 551 } 552 553 protected void testWithCluster(int numNodes, int numRegions, int numRegionsPerServer, 554 int replication, int numTables, boolean assertFullyBalanced, 555 boolean assertFullyBalancedForReplicas) { 556 Map<ServerName, List<RegionInfo>> serverMap = 557 createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); 558 testWithCluster(serverMap, null, assertFullyBalanced, assertFullyBalancedForReplicas); 559 } 560 561 protected void testWithClusterWithIteration(int numNodes, int numRegions, int numRegionsPerServer, 562 int replication, int numTables, boolean assertFullyBalanced, 563 boolean assertFullyBalancedForReplicas) { 564 Map<ServerName, List<RegionInfo>> serverMap = 565 createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); 566 testWithClusterWithIteration(serverMap, null, assertFullyBalanced, 567 assertFullyBalancedForReplicas); 568 } 569 570 protected void testWithCluster(Map<ServerName, List<RegionInfo>> serverMap, 571 RackManager rackManager, boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) { 572 List<ServerAndLoad> list = convertToList(serverMap); 573 LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); 574 575 loadBalancer.setRackManager(rackManager); 576 // Run the balancer. 577 Map<TableName, Map<ServerName, List<RegionInfo>>> LoadOfAllTable = 578 (Map) mockClusterServersWithTables(serverMap); 579 List<RegionPlan> plans = loadBalancer.balanceCluster(LoadOfAllTable); 580 assertNotNull("Initial cluster balance should produce plans.", plans); 581 582 // Check to see that this actually got to a stable place. 583 if (assertFullyBalanced || assertFullyBalancedForReplicas) { 584 // Apply the plan to the mock cluster. 585 List<ServerAndLoad> balancedCluster = reconcile(list, plans, serverMap); 586 587 // Print out the cluster loads to make debugging easier. 588 LOG.info("Mock after Balance : " + printMock(balancedCluster)); 589 590 if (assertFullyBalanced) { 591 assertClusterAsBalanced(balancedCluster); 592 LoadOfAllTable = (Map) mockClusterServersWithTables(serverMap); 593 List<RegionPlan> secondPlans = loadBalancer.balanceCluster(LoadOfAllTable); 594 assertNull("Given a requirement to be fully balanced, second attempt at plans should " 595 + "produce none.", secondPlans); 596 } 597 598 if (assertFullyBalancedForReplicas) { 599 assertRegionReplicaPlacement(serverMap, rackManager); 600 } 601 } 602 } 603 604 protected void testWithClusterWithIteration(Map<ServerName, List<RegionInfo>> serverMap, 605 RackManager rackManager, boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) { 606 List<ServerAndLoad> list = convertToList(serverMap); 607 LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); 608 609 loadBalancer.setRackManager(rackManager); 610 // Run the balancer. 611 Map<TableName, Map<ServerName, List<RegionInfo>>> LoadOfAllTable = 612 (Map) mockClusterServersWithTables(serverMap); 613 List<RegionPlan> plans = loadBalancer.balanceCluster(LoadOfAllTable); 614 assertNotNull("Initial cluster balance should produce plans.", plans); 615 616 List<ServerAndLoad> balancedCluster = null; 617 // Run through iteration until done. Otherwise will be killed as test time out 618 while (plans != null && (assertFullyBalanced || assertFullyBalancedForReplicas)) { 619 // Apply the plan to the mock cluster. 620 balancedCluster = reconcile(list, plans, serverMap); 621 622 // Print out the cluster loads to make debugging easier. 623 LOG.info("Mock after balance: " + printMock(balancedCluster)); 624 625 LoadOfAllTable = (Map) mockClusterServersWithTables(serverMap); 626 plans = loadBalancer.balanceCluster(LoadOfAllTable); 627 } 628 629 // Print out the cluster loads to make debugging easier. 630 LOG.info("Mock Final balance: " + printMock(balancedCluster)); 631 632 if (assertFullyBalanced) { 633 assertNull("Given a requirement to be fully balanced, second attempt at plans should " 634 + "produce none.", plans); 635 } 636 if (assertFullyBalancedForReplicas) { 637 assertRegionReplicaPlacement(serverMap, rackManager); 638 } 639 } 640 641 protected Map<ServerName, List<RegionInfo>> createServerMap(int numNodes, int numRegions, 642 int numRegionsPerServer, int replication, int numTables) { 643 // construct a cluster of numNodes, having a total of numRegions. Each RS will hold 644 // numRegionsPerServer many regions except for the last one, which will host all the 645 // remaining regions 646 int[] cluster = new int[numNodes]; 647 for (int i = 0; i < numNodes; i++) { 648 cluster[i] = numRegionsPerServer; 649 } 650 cluster[cluster.length - 1] = numRegions - ((cluster.length - 1) * numRegionsPerServer); 651 Map<ServerName, List<RegionInfo>> clusterState = mockClusterServers(cluster, numTables); 652 if (replication > 0) { 653 // replicate the regions to the same servers 654 for (List<RegionInfo> regions : clusterState.values()) { 655 int length = regions.size(); 656 for (int i = 0; i < length; i++) { 657 for (int r = 1; r < replication; r++) { 658 regions.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(i), r)); 659 } 660 } 661 } 662 } 663 664 return clusterState; 665 } 666 667}