001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master.procedure;
019
020import java.io.IOException;
021import java.lang.Thread.UncaughtExceptionHandler;
022import java.util.List;
023import java.util.Set;
024import java.util.concurrent.TimeUnit;
025import javax.security.sasl.SaslException;
026import org.apache.hadoop.hbase.CallQueueTooBigException;
027import org.apache.hadoop.hbase.DoNotRetryIOException;
028import org.apache.hadoop.hbase.ServerName;
029import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
030import org.apache.hadoop.hbase.client.RegionInfo;
031import org.apache.hadoop.hbase.exceptions.ConnectionClosedException;
032import org.apache.hadoop.hbase.ipc.RpcConnectionConstants;
033import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
034import org.apache.hadoop.hbase.master.MasterServices;
035import org.apache.hadoop.hbase.master.ServerListener;
036import org.apache.hadoop.hbase.master.ServerManager;
037import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
038import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher;
039import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
040import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
041import org.apache.hadoop.hbase.util.FutureUtils;
042import org.apache.hadoop.ipc.RemoteException;
043import org.apache.yetus.audience.InterfaceAudience;
044import org.slf4j.Logger;
045import org.slf4j.LoggerFactory;
046
047import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
048import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
049
050import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
051import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
052import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
053import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
054import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
055import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
056import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RemoteProcedureRequest;
057
058/**
059 * A remote procecdure dispatcher for regionservers.
060 */
061@InterfaceAudience.Private
062public class RSProcedureDispatcher extends RemoteProcedureDispatcher<MasterProcedureEnv, ServerName>
063  implements ServerListener {
064  private static final Logger LOG = LoggerFactory.getLogger(RSProcedureDispatcher.class);
065
066  public static final String RS_RPC_STARTUP_WAIT_TIME_CONF_KEY =
067    "hbase.regionserver.rpc.startup.waittime";
068  private static final int DEFAULT_RS_RPC_STARTUP_WAIT_TIME = 60000;
069
070  protected final MasterServices master;
071  private final long rsStartupWaitTime;
072  private MasterProcedureEnv procedureEnv;
073
074  public RSProcedureDispatcher(final MasterServices master) {
075    super(master.getConfiguration());
076
077    this.master = master;
078    this.rsStartupWaitTime = master.getConfiguration().getLong(RS_RPC_STARTUP_WAIT_TIME_CONF_KEY,
079      DEFAULT_RS_RPC_STARTUP_WAIT_TIME);
080  }
081
082  @Override
083  protected UncaughtExceptionHandler getUncaughtExceptionHandler() {
084    return new UncaughtExceptionHandler() {
085
086      @Override
087      public void uncaughtException(Thread t, Throwable e) {
088        LOG.error("Unexpected error caught, this may cause the procedure to hang forever", e);
089      }
090    };
091  }
092
093  @Override
094  public boolean start() {
095    if (!super.start()) {
096      return false;
097    }
098    setTimeoutExecutorUncaughtExceptionHandler(this::abort);
099    if (master.isStopped()) {
100      LOG.debug("Stopped");
101      return false;
102    }
103    // Around startup, if failed, some of the below may be set back to null so NPE is possible.
104    ServerManager sm = master.getServerManager();
105    if (sm == null) {
106      LOG.debug("ServerManager is null");
107      return false;
108    }
109    sm.registerListener(this);
110    ProcedureExecutor<MasterProcedureEnv> pe = master.getMasterProcedureExecutor();
111    if (pe == null) {
112      LOG.debug("ProcedureExecutor is null");
113      return false;
114    }
115    this.procedureEnv = pe.getEnvironment();
116    if (this.procedureEnv == null) {
117      LOG.debug("ProcedureEnv is null; stopping={}", master.isStopping());
118      return false;
119    }
120    try {
121      for (ServerName serverName : sm.getOnlineServersList()) {
122        addNode(serverName);
123      }
124    } catch (Exception e) {
125      LOG.info("Failed start", e);
126      return false;
127    }
128    return true;
129  }
130
131  private void abort(Thread t, Throwable e) {
132    LOG.error("Caught error", e);
133    if (!master.isStopped() && !master.isStopping() && !master.isAborted()) {
134      master.abort("Aborting master", e);
135    }
136  }
137
138  @Override
139  public boolean stop() {
140    if (!super.stop()) {
141      return false;
142    }
143
144    master.getServerManager().unregisterListener(this);
145    return true;
146  }
147
148  @Override
149  protected void remoteDispatch(final ServerName serverName,
150    final Set<RemoteProcedure> remoteProcedures) {
151    if (!master.getServerManager().isServerOnline(serverName)) {
152      // fail fast
153      submitTask(new DeadRSRemoteCall(serverName, remoteProcedures));
154    } else {
155      submitTask(new ExecuteProceduresRemoteCall(serverName, remoteProcedures));
156    }
157  }
158
159  @Override
160  protected void abortPendingOperations(final ServerName serverName,
161    final Set<RemoteProcedure> operations) {
162    // TODO: Replace with a ServerNotOnlineException()
163    final IOException e = new DoNotRetryIOException("server not online " + serverName);
164    for (RemoteProcedure proc : operations) {
165      proc.remoteCallFailed(procedureEnv, serverName, e);
166    }
167  }
168
169  @Override
170  public void serverAdded(final ServerName serverName) {
171    addNode(serverName);
172  }
173
174  @Override
175  public void serverRemoved(final ServerName serverName) {
176    removeNode(serverName);
177  }
178
179  private interface RemoteProcedureResolver {
180    void dispatchOpenRequests(MasterProcedureEnv env, List<RegionOpenOperation> operations);
181
182    void dispatchCloseRequests(MasterProcedureEnv env, List<RegionCloseOperation> operations);
183
184    void dispatchServerOperations(MasterProcedureEnv env, List<ServerOperation> operations);
185  }
186
187  /**
188   * Fetches {@link org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation}s
189   * from the given {@code remoteProcedures} and groups them by class of the returned operation.
190   * Then {@code resolver} is used to dispatch {@link RegionOpenOperation}s and
191   * {@link RegionCloseOperation}s.
192   * @param serverName RegionServer to which the remote operations are sent
193   * @param operations Remote procedures which are dispatched to the given server
194   * @param resolver   Used to dispatch remote procedures to given server.
195   */
196  public void splitAndResolveOperation(ServerName serverName, Set<RemoteProcedure> operations,
197    RemoteProcedureResolver resolver) {
198    MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment();
199    ArrayListMultimap<Class<?>, RemoteOperation> reqsByType =
200      buildAndGroupRequestByType(env, serverName, operations);
201
202    List<RegionOpenOperation> openOps = fetchType(reqsByType, RegionOpenOperation.class);
203    if (!openOps.isEmpty()) {
204      resolver.dispatchOpenRequests(env, openOps);
205    }
206
207    List<RegionCloseOperation> closeOps = fetchType(reqsByType, RegionCloseOperation.class);
208    if (!closeOps.isEmpty()) {
209      resolver.dispatchCloseRequests(env, closeOps);
210    }
211
212    List<ServerOperation> refreshOps = fetchType(reqsByType, ServerOperation.class);
213    if (!refreshOps.isEmpty()) {
214      resolver.dispatchServerOperations(env, refreshOps);
215    }
216
217    if (!reqsByType.isEmpty()) {
218      LOG.warn("unknown request type in the queue: " + reqsByType);
219    }
220  }
221
222  private class DeadRSRemoteCall extends ExecuteProceduresRemoteCall {
223
224    public DeadRSRemoteCall(ServerName serverName, Set<RemoteProcedure> remoteProcedures) {
225      super(serverName, remoteProcedures);
226    }
227
228    @Override
229    public void run() {
230      remoteCallFailed(procedureEnv,
231        new RegionServerStoppedException("Server " + getServerName() + " is not online"));
232    }
233  }
234
235  // ==========================================================================
236  // Compatibility calls
237  // ==========================================================================
238  protected class ExecuteProceduresRemoteCall implements RemoteProcedureResolver, Runnable {
239
240    private final ServerName serverName;
241
242    private final Set<RemoteProcedure> remoteProcedures;
243
244    private int numberOfAttemptsSoFar = 0;
245    private long maxWaitTime = -1;
246
247    private final long rsRpcRetryInterval;
248    private static final String RS_RPC_RETRY_INTERVAL_CONF_KEY =
249      "hbase.regionserver.rpc.retry.interval";
250    private static final int DEFAULT_RS_RPC_RETRY_INTERVAL = 100;
251
252    /**
253     * Config to determine the retry limit while executing remote regionserver procedure. This retry
254     * limit applies to only specific errors. These errors could potentially get the remote
255     * procedure stuck for several minutes unless the retry limit is applied.
256     */
257    private static final String RS_REMOTE_PROC_FAIL_FAST_LIMIT =
258      "hbase.master.rs.remote.proc.fail.fast.limit";
259    /**
260     * The default retry limit. Waiting for more than {@value} attempts is not going to help much
261     * for genuine connectivity errors. Therefore, consider fail-fast after {@value} retries. Value
262     * = {@value}
263     */
264    private static final int DEFAULT_RS_REMOTE_PROC_RETRY_LIMIT = 5;
265
266    private final int failFastRetryLimit;
267
268    private ExecuteProceduresRequest.Builder request = null;
269
270    public ExecuteProceduresRemoteCall(final ServerName serverName,
271      final Set<RemoteProcedure> remoteProcedures) {
272      this.serverName = serverName;
273      this.remoteProcedures = remoteProcedures;
274      this.rsRpcRetryInterval = master.getConfiguration().getLong(RS_RPC_RETRY_INTERVAL_CONF_KEY,
275        DEFAULT_RS_RPC_RETRY_INTERVAL);
276      this.failFastRetryLimit = master.getConfiguration().getInt(RS_REMOTE_PROC_FAIL_FAST_LIMIT,
277        DEFAULT_RS_REMOTE_PROC_RETRY_LIMIT);
278    }
279
280    private AsyncRegionServerAdmin getRsAdmin() throws IOException {
281      return master.getAsyncClusterConnection().getRegionServerAdmin(serverName);
282    }
283
284    protected final ServerName getServerName() {
285      return serverName;
286    }
287
288    private boolean scheduleForRetry(IOException e) {
289      LOG.debug("Request to {} failed, try={}", serverName, numberOfAttemptsSoFar, e);
290      // Should we wait a little before retrying? If the server is starting it's yes.
291      if (e instanceof ServerNotRunningYetException) {
292        long remainingTime = getMaxWaitTime() - EnvironmentEdgeManager.currentTime();
293        if (remainingTime > 0) {
294          LOG.warn("Waiting a little before retrying {}, try={}, can wait up to {}ms", serverName,
295            numberOfAttemptsSoFar, remainingTime);
296          numberOfAttemptsSoFar++;
297          // Retry every rsRpcRetryInterval millis up to maximum wait time.
298          submitTask(this, rsRpcRetryInterval, TimeUnit.MILLISECONDS);
299          return true;
300        }
301        LOG.warn("{} is throwing ServerNotRunningYetException for {}ms; trying another server",
302          serverName, getMaxWaitTime());
303        return false;
304      }
305      if (e instanceof DoNotRetryIOException) {
306        LOG.warn("{} tells us DoNotRetry due to {}, try={}, give up", serverName, e.toString(),
307          numberOfAttemptsSoFar);
308        return false;
309      }
310      // This category of exceptions is thrown in the rpc framework, where we can make sure
311      // that the call has not been executed yet, so it is safe to mark it as fail.
312      // Especially for open a region, we'd better choose another region server.
313      // Notice that, it is safe to quit only if this is the first time we send request to region
314      // server. Maybe the region server has accepted our request the first time, and then there is
315      // a network error which prevents we receive the response, and the second time we hit
316      // this category of exceptions, obviously it is not safe to quit here, otherwise it may lead
317      // to a double assign...
318      if (numberOfAttemptsSoFar == 0 && unableToConnectToServer(e)) {
319        return false;
320      }
321
322      // Check if the num of attempts have crossed the retry limit, and if the error type can
323      // fail-fast.
324      if (numberOfAttemptsSoFar >= failFastRetryLimit - 1 && isErrorTypeFailFast(e)) {
325        LOG
326          .warn("Number of retries {} exceeded limit {} for the given error type. Scheduling server"
327            + " crash for {}", numberOfAttemptsSoFar + 1, failFastRetryLimit, serverName, e);
328        // Expiring the server will schedule SCP and also reject the regionserver report from the
329        // regionserver if regionserver is somehow able to send the regionserver report to master.
330        // The master rejects the report by throwing YouAreDeadException, which would eventually
331        // result in the regionserver abort.
332        // This will also remove "serverName" from the ServerManager's onlineServers map.
333        master.getServerManager().expireServer(serverName);
334        return false;
335      }
336      // Always retry for other exception types if the region server is not dead yet.
337      if (!master.getServerManager().isServerOnline(serverName)) {
338        LOG.warn("Request to {} failed due to {}, try={} and the server is not online, give up",
339          serverName, e.toString(), numberOfAttemptsSoFar);
340        return false;
341      }
342      if (e instanceof RegionServerStoppedException) {
343        // A better way is to return true here to let the upper layer quit, and then schedule a
344        // background task to check whether the region server is dead. And if it is dead, call
345        // remoteCallFailed to tell the upper layer. Keep retrying here does not lead to incorrect
346        // result, but waste some resources.
347        LOG.warn("{} is aborted or stopped, for safety we still need to"
348          + " wait until it is fully dead, try={}", serverName, numberOfAttemptsSoFar);
349      } else {
350        LOG.warn("request to {} failed due to {}, try={}, retrying... , request params: {}",
351          serverName, e.toString(), numberOfAttemptsSoFar, request.build());
352      }
353      numberOfAttemptsSoFar++;
354      // Add some backoff here as the attempts rise otherwise if a stuck condition, will fill logs
355      // with failed attempts. None of our backoff classes -- RetryCounter or ClientBackoffPolicy
356      // -- fit here nicely so just do something simple; increment by rsRpcRetryInterval millis *
357      // retry^2 on each try
358      // up to max of 10 seconds (don't want to back off too much in case of situation change).
359      submitTask(this,
360        Math.min(
361          rsRpcRetryInterval * ((long) this.numberOfAttemptsSoFar * this.numberOfAttemptsSoFar),
362          10 * 1000),
363        TimeUnit.MILLISECONDS);
364      return true;
365    }
366
367    /**
368     * The category of exceptions where we can ensure that the request has not yet been received
369     * and/or processed by the target regionserver yet and hence we can determine whether it is safe
370     * to choose different regionserver as the target.
371     * @param e IOException thrown by the underlying rpc framework.
372     * @return true if the exception belongs to the category where the regionserver has not yet
373     *         received the request yet.
374     */
375    private boolean unableToConnectToServer(IOException e) {
376      if (e instanceof CallQueueTooBigException) {
377        LOG.warn("request to {} failed due to {}, try={}, this usually because"
378          + " server is overloaded, give up", serverName, e, numberOfAttemptsSoFar);
379        return true;
380      }
381      if (isSaslError(e)) {
382        LOG.warn("{} is not reachable; give up after first attempt", serverName, e);
383        return true;
384      }
385      return false;
386    }
387
388    private boolean isSaslError(IOException e) {
389      Throwable cause = e;
390      while (true) {
391        if (cause instanceof IOException) {
392          IOException unwrappedCause = unwrapException((IOException) cause);
393          if (
394            unwrappedCause instanceof SaslException
395              || (unwrappedCause.getMessage() != null && unwrappedCause.getMessage()
396                .contains(RpcConnectionConstants.RELOGIN_IS_IN_PROGRESS))
397          ) {
398            return true;
399          }
400        }
401        cause = cause.getCause();
402        if (cause == null) {
403          return false;
404        }
405      }
406    }
407
408    /**
409     * Returns true if the error or its cause is of type ConnectionClosedException.
410     * @param e IOException thrown by the underlying rpc framework.
411     * @return True if the error or its cause is of type ConnectionClosedException.
412     */
413    private boolean isConnectionClosedError(IOException e) {
414      if (e instanceof ConnectionClosedException) {
415        return true;
416      }
417      Throwable cause = e;
418      while (true) {
419        if (cause instanceof IOException) {
420          IOException unwrappedCause = unwrapException((IOException) cause);
421          if (unwrappedCause instanceof ConnectionClosedException) {
422            return true;
423          }
424        }
425        cause = cause.getCause();
426        if (cause == null) {
427          return false;
428        }
429      }
430    }
431
432    /**
433     * Returns true if the error type can allow fail-fast.
434     * @param e IOException thrown by the underlying rpc framework.
435     * @return True if the error type can allow fail-fast.
436     */
437    private boolean isErrorTypeFailFast(IOException e) {
438      return e instanceof CallQueueTooBigException || isSaslError(e) || isConnectionClosedError(e);
439    }
440
441    private long getMaxWaitTime() {
442      if (this.maxWaitTime < 0) {
443        // This is the max attempts, not retries, so it should be at least 1.
444        this.maxWaitTime = EnvironmentEdgeManager.currentTime() + rsStartupWaitTime;
445      }
446      return this.maxWaitTime;
447    }
448
449    private IOException unwrapException(IOException e) {
450      if (e instanceof RemoteException) {
451        e = ((RemoteException) e).unwrapRemoteException();
452      }
453      return e;
454    }
455
456    @Override
457    public void run() {
458      request = ExecuteProceduresRequest.newBuilder();
459      if (LOG.isTraceEnabled()) {
460        LOG.trace("Building request with operations count=" + remoteProcedures.size());
461      }
462      splitAndResolveOperation(getServerName(), remoteProcedures, this);
463
464      try {
465        sendRequest(getServerName(), request.build());
466      } catch (IOException e) {
467        e = unwrapException(e);
468        // TODO: In the future some operation may want to bail out early.
469        // TODO: How many times should we retry (use numberOfAttemptsSoFar)
470        if (!scheduleForRetry(e)) {
471          remoteCallFailed(procedureEnv, e);
472        }
473      }
474    }
475
476    @Override
477    public void dispatchOpenRequests(final MasterProcedureEnv env,
478      final List<RegionOpenOperation> operations) {
479      request.addOpenRegion(buildOpenRegionRequest(env, getServerName(), operations));
480    }
481
482    @Override
483    public void dispatchCloseRequests(final MasterProcedureEnv env,
484      final List<RegionCloseOperation> operations) {
485      for (RegionCloseOperation op : operations) {
486        request.addCloseRegion(op.buildCloseRegionRequest(getServerName()));
487      }
488    }
489
490    @Override
491    public void dispatchServerOperations(MasterProcedureEnv env, List<ServerOperation> operations) {
492      operations.stream().map(ServerOperation::buildRequest).forEachOrdered(request::addProc);
493    }
494
495    // will be overridden in test.
496    protected ExecuteProceduresResponse sendRequest(final ServerName serverName,
497      final ExecuteProceduresRequest request) throws IOException {
498      return FutureUtils.get(getRsAdmin().executeProcedures(request));
499    }
500
501    protected final void remoteCallFailed(final MasterProcedureEnv env, final IOException e) {
502      for (RemoteProcedure proc : remoteProcedures) {
503        proc.remoteCallFailed(env, getServerName(), e);
504      }
505    }
506  }
507
508  private static OpenRegionRequest buildOpenRegionRequest(final MasterProcedureEnv env,
509    final ServerName serverName, final List<RegionOpenOperation> operations) {
510    final OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
511    builder.setServerStartCode(serverName.getStartCode());
512    operations.stream().map(RemoteOperation::getInitiatingMasterActiveTime).findAny()
513      .ifPresent(builder::setInitiatingMasterActiveTime);
514    builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
515    for (RegionOpenOperation op : operations) {
516      builder.addOpenInfo(op.buildRegionOpenInfoRequest(env));
517    }
518    return builder.build();
519  }
520
521  // ==========================================================================
522  // RPC Messages
523  // - ServerOperation: refreshConfig, grant, revoke, ... (TODO)
524  // - RegionOperation: open, close, flush, snapshot, ...
525  // ==========================================================================
526
527  public static final class ServerOperation extends RemoteOperation {
528
529    private final long procId;
530
531    private final Class<?> rsProcClass;
532
533    private final byte[] rsProcData;
534
535    public ServerOperation(RemoteProcedure remoteProcedure, long procId, Class<?> rsProcClass,
536      byte[] rsProcData, long initiatingMasterActiveTime) {
537      super(remoteProcedure, initiatingMasterActiveTime);
538      this.procId = procId;
539      this.rsProcClass = rsProcClass;
540      this.rsProcData = rsProcData;
541    }
542
543    public RemoteProcedureRequest buildRequest() {
544      return RemoteProcedureRequest.newBuilder().setProcId(procId)
545        .setProcClass(rsProcClass.getName()).setProcData(ByteString.copyFrom(rsProcData))
546        .setInitiatingMasterActiveTime(getInitiatingMasterActiveTime()).build();
547    }
548  }
549
550  public static abstract class RegionOperation extends RemoteOperation {
551    protected final RegionInfo regionInfo;
552    protected final long procId;
553
554    protected RegionOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, long procId,
555      long initiatingMasterActiveTime) {
556      super(remoteProcedure, initiatingMasterActiveTime);
557      this.regionInfo = regionInfo;
558      this.procId = procId;
559    }
560  }
561
562  public static class RegionOpenOperation extends RegionOperation {
563
564    public RegionOpenOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, long procId,
565      long initiatingMasterActiveTime) {
566      super(remoteProcedure, regionInfo, procId, initiatingMasterActiveTime);
567    }
568
569    public OpenRegionRequest.RegionOpenInfo
570      buildRegionOpenInfoRequest(final MasterProcedureEnv env) {
571      return RequestConverter.buildRegionOpenInfo(regionInfo,
572        env.getAssignmentManager().getFavoredNodes(regionInfo), procId);
573    }
574  }
575
576  public static class RegionCloseOperation extends RegionOperation {
577    private final ServerName destinationServer;
578    private boolean evictCache;
579
580    public RegionCloseOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, long procId,
581      ServerName destinationServer, boolean evictCache, long initiatingMasterActiveTime) {
582      super(remoteProcedure, regionInfo, procId, initiatingMasterActiveTime);
583      this.destinationServer = destinationServer;
584      this.evictCache = evictCache;
585    }
586
587    public ServerName getDestinationServer() {
588      return destinationServer;
589    }
590
591    public CloseRegionRequest buildCloseRegionRequest(final ServerName serverName) {
592      return ProtobufUtil.buildCloseRegionRequest(serverName, regionInfo.getRegionName(),
593        getDestinationServer(), procId, evictCache, getInitiatingMasterActiveTime());
594    }
595  }
596}