001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import static java.util.stream.Collectors.toList;
021import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
022import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
023import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
024
025import java.io.IOException;
026import java.lang.reflect.UndeclaredThrowableException;
027import java.net.UnknownHostException;
028import java.util.Arrays;
029import java.util.List;
030import java.util.Map;
031import java.util.Optional;
032import java.util.concurrent.CompletableFuture;
033import java.util.concurrent.ExecutorService;
034import java.util.concurrent.ThreadLocalRandom;
035import java.util.concurrent.TimeUnit;
036import java.util.concurrent.atomic.AtomicReference;
037import java.util.function.Function;
038import java.util.function.Predicate;
039import java.util.function.Supplier;
040import org.apache.hadoop.conf.Configuration;
041import org.apache.hadoop.hbase.Cell;
042import org.apache.hadoop.hbase.CellComparator;
043import org.apache.hadoop.hbase.HConstants;
044import org.apache.hadoop.hbase.PrivateCellUtil;
045import org.apache.hadoop.hbase.RegionLocations;
046import org.apache.hadoop.hbase.ServerName;
047import org.apache.hadoop.hbase.TableName;
048import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
049import org.apache.hadoop.hbase.ipc.FatalConnectionException;
050import org.apache.hadoop.hbase.ipc.HBaseRpcController;
051import org.apache.hadoop.hbase.security.User;
052import org.apache.hadoop.hbase.util.Bytes;
053import org.apache.hadoop.hbase.util.ReflectionUtils;
054import org.apache.hadoop.ipc.RemoteException;
055import org.apache.hadoop.net.DNS;
056import org.apache.yetus.audience.InterfaceAudience;
057import org.slf4j.Logger;
058import org.slf4j.LoggerFactory;
059
060import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
061import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
062import org.apache.hbase.thirdparty.io.netty.util.Timer;
063
064import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
065import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
066import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
067import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
068
069/**
070 * Utility used by client connections.
071 */
072@InterfaceAudience.Private
073public final class ConnectionUtils {
074
075  private static final Logger LOG = LoggerFactory.getLogger(ConnectionUtils.class);
076
077  private ConnectionUtils() {
078  }
079
080  /**
081   * Calculate pause time. Built on {@link HConstants#RETRY_BACKOFF}.
082   * @param pause time to pause
083   * @param tries amount of tries
084   * @return How long to wait after <code>tries</code> retries
085   */
086  public static long getPauseTime(final long pause, final int tries) {
087    int ntries = tries;
088    if (ntries >= HConstants.RETRY_BACKOFF.length) {
089      ntries = HConstants.RETRY_BACKOFF.length - 1;
090    }
091    if (ntries < 0) {
092      ntries = 0;
093    }
094
095    long normalPause = pause * HConstants.RETRY_BACKOFF[ntries];
096    // 1% possible jitter
097    long jitter = (long) (normalPause * ThreadLocalRandom.current().nextFloat() * 0.01f);
098    return normalPause + jitter;
099  }
100
101  /**
102   * Inject a nonce generator for testing.
103   * @param conn The connection for which to replace the generator.
104   * @param cnm  Replaces the nonce generator used, for testing.
105   * @return old nonce generator.
106   */
107  public static NonceGenerator injectNonceGeneratorForTesting(ClusterConnection conn,
108    NonceGenerator cnm) {
109    return ConnectionImplementation.injectNonceGeneratorForTesting(conn, cnm);
110  }
111
112  /**
113   * Changes the configuration to set the number of retries needed when using Connection internally,
114   * e.g. for updating catalog tables, etc. Call this method before we create any Connections.
115   * @param c   The Configuration instance to set the retries into.
116   * @param log Used to log what we set in here.
117   */
118  public static void setServerSideHConnectionRetriesConfig(final Configuration c, final String sn,
119    final Logger log) {
120    // TODO: Fix this. Not all connections from server side should have 10 times the retries.
121    int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
122      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
123    // Go big. Multiply by 10. If we can't get to meta after this many retries
124    // then something seriously wrong.
125    int serversideMultiplier = c.getInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER,
126      HConstants.DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER);
127    int retries = hcRetries * serversideMultiplier;
128    c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
129    log.info(sn + " server-side Connection retries=" + retries);
130  }
131
132  /**
133   * Setup the connection class, so that it will not depend on master being online. Used for testing
134   * @param conf configuration to set
135   */
136  public static void setupMasterlessConnection(Configuration conf) {
137    conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, MasterlessConnection.class.getName());
138  }
139
140  /**
141   * Some tests shut down the master. But table availability is a master RPC which is performed on
142   * region re-lookups.
143   */
144  static class MasterlessConnection extends ConnectionImplementation {
145    MasterlessConnection(Configuration conf, ExecutorService pool, User user,
146      Map<String, byte[]> requestAttributes) throws IOException {
147      super(conf, pool, user, requestAttributes);
148    }
149
150    @Override
151    public boolean isTableDisabled(TableName tableName) throws IOException {
152      // treat all tables as enabled
153      return false;
154    }
155  }
156
157  /**
158   * Get a unique key for the rpc stub to the given server.
159   */
160  static String getStubKey(String serviceName, ServerName serverName) {
161    return String.format("%s@%s", serviceName, serverName);
162  }
163
164  /**
165   * Return retires + 1. The returned value will be in range [1, Integer.MAX_VALUE].
166   */
167  static int retries2Attempts(int retries) {
168    return Math.max(1, retries == Integer.MAX_VALUE ? Integer.MAX_VALUE : retries + 1);
169  }
170
171  static void checkHasFamilies(Mutation mutation) {
172    Preconditions.checkArgument(mutation.numFamilies() > 0,
173      "Invalid arguments to %s, zero columns specified", mutation.toString());
174  }
175
176  /** Dummy nonce generator for disabled nonces. */
177  static final NonceGenerator NO_NONCE_GENERATOR = new NonceGenerator() {
178
179    @Override
180    public long newNonce() {
181      return HConstants.NO_NONCE;
182    }
183
184    @Override
185    public long getNonceGroup() {
186      return HConstants.NO_NONCE;
187    }
188  };
189
190  // A byte array in which all elements are the max byte, and it is used to
191  // construct closest front row
192  static final byte[] MAX_BYTE_ARRAY = Bytes.createMaxByteArray(9);
193
194  /**
195   * Create the closest row after the specified row
196   */
197  static byte[] createClosestRowAfter(byte[] row) {
198    return Arrays.copyOf(row, row.length + 1);
199  }
200
201  /**
202   * Create a row before the specified row and very close to the specified row.
203   */
204  static byte[] createCloseRowBefore(byte[] row) {
205    if (row.length == 0) {
206      return MAX_BYTE_ARRAY;
207    }
208    if (row[row.length - 1] == 0) {
209      return Arrays.copyOf(row, row.length - 1);
210    } else {
211      byte[] nextRow = new byte[row.length + MAX_BYTE_ARRAY.length];
212      System.arraycopy(row, 0, nextRow, 0, row.length - 1);
213      nextRow[row.length - 1] = (byte) ((row[row.length - 1] & 0xFF) - 1);
214      System.arraycopy(MAX_BYTE_ARRAY, 0, nextRow, row.length, MAX_BYTE_ARRAY.length);
215      return nextRow;
216    }
217  }
218
219  static boolean isEmptyStartRow(byte[] row) {
220    return Bytes.equals(row, EMPTY_START_ROW);
221  }
222
223  static boolean isEmptyStopRow(byte[] row) {
224    return Bytes.equals(row, EMPTY_END_ROW);
225  }
226
227  static void resetController(HBaseRpcController controller, long timeoutNs, int priority,
228    TableName tableName) {
229    controller.reset();
230    if (timeoutNs >= 0) {
231      controller.setCallTimeout(
232        (int) Math.min(Integer.MAX_VALUE, TimeUnit.NANOSECONDS.toMillis(timeoutNs)));
233    }
234    controller.setPriority(priority);
235    if (tableName != null) {
236      controller.setTableName(tableName);
237    }
238  }
239
240  static Throwable translateException(Throwable t) {
241    if (t instanceof UndeclaredThrowableException && t.getCause() != null) {
242      t = t.getCause();
243    }
244    if (t instanceof RemoteException) {
245      t = ((RemoteException) t).unwrapRemoteException();
246    }
247    if (t instanceof ServiceException && t.getCause() != null) {
248      t = translateException(t.getCause());
249    }
250    return t;
251  }
252
253  static long calcEstimatedSize(Result rs) {
254    long estimatedHeapSizeOfResult = 0;
255    // We don't make Iterator here
256    for (Cell cell : rs.rawCells()) {
257      estimatedHeapSizeOfResult += cell.heapSize();
258    }
259    return estimatedHeapSizeOfResult;
260  }
261
262  static Result filterCells(Result result, Cell keepCellsAfter) {
263    if (keepCellsAfter == null) {
264      // do not need to filter
265      return result;
266    }
267    // not the same row
268    if (!PrivateCellUtil.matchingRows(keepCellsAfter, result.getRow(), 0, result.getRow().length)) {
269      return result;
270    }
271    Cell[] rawCells = result.rawCells();
272    int index = Arrays.binarySearch(rawCells, keepCellsAfter,
273      CellComparator.getInstance()::compareWithoutRow);
274    if (index < 0) {
275      index = -index - 1;
276    } else {
277      index++;
278    }
279    if (index == 0) {
280      return result;
281    }
282    if (index == rawCells.length) {
283      return null;
284    }
285    return Result.create(Arrays.copyOfRange(rawCells, index, rawCells.length), null,
286      result.isStale(), result.mayHaveMoreCellsInRow());
287  }
288
289  // Add a delta to avoid timeout immediately after a retry sleeping.
290  public static final long SLEEP_DELTA_NS = TimeUnit.MILLISECONDS.toNanos(1);
291
292  static Get toCheckExistenceOnly(Get get) {
293    if (get.isCheckExistenceOnly()) {
294      return get;
295    }
296    return ReflectionUtils.newInstance(get.getClass(), get).setCheckExistenceOnly(true);
297  }
298
299  static List<Get> toCheckExistenceOnly(List<Get> gets) {
300    return gets.stream().map(ConnectionUtils::toCheckExistenceOnly).collect(toList());
301  }
302
303  static RegionLocateType getLocateType(Scan scan) {
304    if (scan.isReversed()) {
305      if (isEmptyStartRow(scan.getStartRow())) {
306        return RegionLocateType.BEFORE;
307      } else {
308        return scan.includeStartRow() ? RegionLocateType.CURRENT : RegionLocateType.BEFORE;
309      }
310    } else {
311      return scan.includeStartRow() ? RegionLocateType.CURRENT : RegionLocateType.AFTER;
312    }
313  }
314
315  static boolean noMoreResultsForScan(Scan scan, RegionInfo info) {
316    if (isEmptyStopRow(info.getEndKey())) {
317      return true;
318    }
319    if (isEmptyStopRow(scan.getStopRow())) {
320      return false;
321    }
322    int c = Bytes.compareTo(info.getEndKey(), scan.getStopRow());
323    // 1. if our stop row is less than the endKey of the region
324    // 2. if our stop row is equal to the endKey of the region and we do not include the stop row
325    // for scan.
326    return c > 0 || (c == 0 && !scan.includeStopRow());
327  }
328
329  static boolean noMoreResultsForReverseScan(Scan scan, RegionInfo info) {
330    if (isEmptyStartRow(info.getStartKey())) {
331      return true;
332    }
333    if (isEmptyStopRow(scan.getStopRow())) {
334      return false;
335    }
336    // no need to test the inclusive of the stop row as the start key of a region is included in
337    // the region.
338    return Bytes.compareTo(info.getStartKey(), scan.getStopRow()) <= 0;
339  }
340
341  public static ScanResultCache createScanResultCache(Scan scan) {
342    if (scan.getAllowPartialResults()) {
343      return new AllowPartialScanResultCache();
344    } else if (scan.getBatch() > 0) {
345      return new BatchScanResultCache(scan.getBatch());
346    } else {
347      return new CompleteScanResultCache();
348    }
349  }
350
351  private static final String MY_ADDRESS = getMyAddress();
352
353  private static String getMyAddress() {
354    try {
355      return DNS.getDefaultHost("default", "default");
356    } catch (UnknownHostException uhe) {
357      LOG.error("cannot determine my address", uhe);
358      return null;
359    }
360  }
361
362  static boolean isRemote(String host) {
363    return !host.equalsIgnoreCase(MY_ADDRESS);
364  }
365
366  static void incRPCCallsMetrics(ScanMetrics scanMetrics, boolean isRegionServerRemote) {
367    if (scanMetrics == null) {
368      return;
369    }
370    scanMetrics.countOfRPCcalls.incrementAndGet();
371    if (isRegionServerRemote) {
372      scanMetrics.countOfRemoteRPCcalls.incrementAndGet();
373    }
374  }
375
376  static void incRPCRetriesMetrics(ScanMetrics scanMetrics, boolean isRegionServerRemote) {
377    if (scanMetrics == null) {
378      return;
379    }
380    scanMetrics.countOfRPCRetries.incrementAndGet();
381    if (isRegionServerRemote) {
382      scanMetrics.countOfRemoteRPCRetries.incrementAndGet();
383    }
384  }
385
386  static void updateResultsMetrics(ScanMetrics scanMetrics, Result[] rrs,
387    boolean isRegionServerRemote) {
388    if (scanMetrics == null || rrs == null || rrs.length == 0) {
389      return;
390    }
391    long resultSize = 0;
392    for (Result rr : rrs) {
393      for (Cell cell : rr.rawCells()) {
394        resultSize += PrivateCellUtil.estimatedSerializedSizeOf(cell);
395      }
396    }
397    scanMetrics.countOfBytesInResults.addAndGet(resultSize);
398    if (isRegionServerRemote) {
399      scanMetrics.countOfBytesInRemoteResults.addAndGet(resultSize);
400    }
401  }
402
403  /**
404   * Use the scan metrics returned by the server to add to the identically named counters in the
405   * client side metrics. If a counter does not exist with the same name as the server side metric,
406   * the attempt to increase the counter will fail.
407   */
408  static void updateServerSideMetrics(ScanMetrics scanMetrics, ScanResponse response) {
409    if (scanMetrics == null || response == null || !response.hasScanMetrics()) {
410      return;
411    }
412    ResponseConverter.getScanMetrics(response).forEach(scanMetrics::addToCounter);
413  }
414
415  static void incRegionCountMetrics(ScanMetrics scanMetrics) {
416    if (scanMetrics == null) {
417      return;
418    }
419    scanMetrics.countOfRegions.incrementAndGet();
420  }
421
422  /**
423   * Connect the two futures, if the src future is done, then mark the dst future as done. And if
424   * the dst future is done, then cancel the src future. This is used for timeline consistent read.
425   * <p/>
426   * Pass empty metrics if you want to link the primary future and the dst future so we will not
427   * increase the hedge read related metrics.
428   */
429  private static <T> void connect(CompletableFuture<T> srcFuture, CompletableFuture<T> dstFuture,
430    Optional<MetricsConnection> metrics) {
431    addListener(srcFuture, (r, e) -> {
432      if (e != null) {
433        dstFuture.completeExceptionally(e);
434      } else {
435        if (dstFuture.complete(r)) {
436          metrics.ifPresent(MetricsConnection::incrHedgedReadWin);
437        }
438      }
439    });
440    // The cancellation may be a dummy one as the dstFuture may be completed by this srcFuture.
441    // Notice that this is a bit tricky, as the execution chain maybe 'complete src -> complete dst
442    // -> cancel src', for now it seems to be fine, as the will use CAS to set the result first in
443    // CompletableFuture. If later this causes problems, we could use whenCompleteAsync to break the
444    // tie.
445    addListener(dstFuture, (r, e) -> srcFuture.cancel(false));
446  }
447
448  private static <T> void sendRequestsToSecondaryReplicas(
449    Function<Integer, CompletableFuture<T>> requestReplica, RegionLocations locs,
450    CompletableFuture<T> future, Optional<MetricsConnection> metrics) {
451    if (future.isDone()) {
452      // do not send requests to secondary replicas if the future is done, i.e, the primary request
453      // has already been finished.
454      return;
455    }
456    for (int replicaId = 1, n = locs.size(); replicaId < n; replicaId++) {
457      CompletableFuture<T> secondaryFuture = requestReplica.apply(replicaId);
458      metrics.ifPresent(MetricsConnection::incrHedgedReadOps);
459      connect(secondaryFuture, future, metrics);
460    }
461  }
462
463  static <T> CompletableFuture<T> timelineConsistentRead(AsyncRegionLocator locator,
464    TableName tableName, Query query, byte[] row, RegionLocateType locateType,
465    Function<Integer, CompletableFuture<T>> requestReplica, long rpcTimeoutNs,
466    long primaryCallTimeoutNs, Timer retryTimer, Optional<MetricsConnection> metrics) {
467    if (query.getConsistency() != Consistency.TIMELINE) {
468      return requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID);
469    }
470    // user specifies a replica id explicitly, just send request to the specific replica
471    if (query.getReplicaId() >= 0) {
472      return requestReplica.apply(query.getReplicaId());
473    }
474    // Timeline consistent read, where we may send requests to other region replicas
475    CompletableFuture<T> primaryFuture = requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID);
476    CompletableFuture<T> future = new CompletableFuture<>();
477    connect(primaryFuture, future, Optional.empty());
478    long startNs = System.nanoTime();
479    // after the getRegionLocations, all the locations for the replicas of this region should have
480    // been cached, so it is not big deal to locate them again when actually sending requests to
481    // these replicas.
482    addListener(locator.getRegionLocations(tableName, row, locateType, false, rpcTimeoutNs),
483      (locs, error) -> {
484        if (error != null) {
485          LOG.warn(
486            "Failed to locate all the replicas for table={}, row='{}', locateType={}"
487              + " give up timeline consistent read",
488            tableName, Bytes.toStringBinary(row), locateType, error);
489          return;
490        }
491        if (locs.size() <= 1) {
492          LOG.warn(
493            "There are no secondary replicas for region {}, give up timeline consistent read",
494            locs.getDefaultRegionLocation().getRegion());
495          return;
496        }
497        long delayNs = primaryCallTimeoutNs - (System.nanoTime() - startNs);
498        if (delayNs <= 0) {
499          sendRequestsToSecondaryReplicas(requestReplica, locs, future, metrics);
500        } else {
501          retryTimer.newTimeout(
502            timeout -> sendRequestsToSecondaryReplicas(requestReplica, locs, future, metrics),
503            delayNs, TimeUnit.NANOSECONDS);
504        }
505      });
506    return future;
507  }
508
509  // validate for well-formedness
510  static void validatePut(Put put, int maxKeyValueSize) {
511    if (put.isEmpty()) {
512      throw new IllegalArgumentException("No columns to insert");
513    }
514    if (maxKeyValueSize > 0) {
515      for (List<Cell> list : put.getFamilyCellMap().values()) {
516        for (Cell cell : list) {
517          if (cell.getSerializedSize() > maxKeyValueSize) {
518            throw new IllegalArgumentException("KeyValue size too large");
519          }
520        }
521      }
522    }
523  }
524
525  static void validatePutsInRowMutations(RowMutations rowMutations, int maxKeyValueSize) {
526    for (Mutation mutation : rowMutations.getMutations()) {
527      if (mutation instanceof Put) {
528        validatePut((Put) mutation, maxKeyValueSize);
529      }
530    }
531  }
532
533  /**
534   * Select the priority for the rpc call.
535   * <p/>
536   * The rules are:
537   * <ol>
538   * <li>If user set a priority explicitly, then just use it.</li>
539   * <li>For system table, use {@link HConstants#SYSTEMTABLE_QOS}.</li>
540   * <li>For other tables, use {@link HConstants#NORMAL_QOS}.</li>
541   * </ol>
542   * @param priority  the priority set by user, can be {@link HConstants#PRIORITY_UNSET}.
543   * @param tableName the table we operate on
544   */
545  static int calcPriority(int priority, TableName tableName) {
546    if (priority != HConstants.PRIORITY_UNSET) {
547      return priority;
548    } else {
549      return getPriority(tableName);
550    }
551  }
552
553  static int getPriority(TableName tableName) {
554    if (tableName.isSystemTable()) {
555      return HConstants.SYSTEMTABLE_QOS;
556    } else {
557      return HConstants.NORMAL_QOS;
558    }
559  }
560
561  static <T> CompletableFuture<T> getOrFetch(AtomicReference<T> cacheRef,
562    AtomicReference<CompletableFuture<T>> futureRef, boolean reload,
563    Supplier<CompletableFuture<T>> fetch, Predicate<T> validator, String type) {
564    for (;;) {
565      if (!reload) {
566        T value = cacheRef.get();
567        if (value != null && validator.test(value)) {
568          return CompletableFuture.completedFuture(value);
569        }
570      }
571      LOG.trace("{} cache is null, try fetching from registry", type);
572      if (futureRef.compareAndSet(null, new CompletableFuture<>())) {
573        LOG.debug("Start fetching {} from registry", type);
574        CompletableFuture<T> future = futureRef.get();
575        addListener(fetch.get(), (value, error) -> {
576          if (error != null) {
577            LOG.debug("Failed to fetch {} from registry", type, error);
578            futureRef.getAndSet(null).completeExceptionally(error);
579            return;
580          }
581          LOG.debug("The fetched {} is {}", type, value);
582          // Here we update cache before reset future, so it is possible that someone can get a
583          // stale value. Consider this:
584          // 1. update cacheRef
585          // 2. someone clears the cache and relocates again
586          // 3. the futureRef is not null so the old future is used.
587          // 4. we clear futureRef and complete the future in it with the value being
588          // cleared in step 2.
589          // But we do not think it is a big deal as it rarely happens, and even if it happens, the
590          // caller will retry again later, no correctness problems.
591          cacheRef.set(value);
592          futureRef.set(null);
593          future.complete(value);
594        });
595        return future;
596      } else {
597        CompletableFuture<T> future = futureRef.get();
598        if (future != null) {
599          return future;
600        }
601      }
602    }
603  }
604
605  static void updateStats(Optional<ServerStatisticTracker> optStats,
606    Optional<MetricsConnection> optMetrics, ServerName serverName, MultiResponse resp) {
607    if (!optStats.isPresent() && !optMetrics.isPresent()) {
608      // ServerStatisticTracker and MetricsConnection are both not present, just return
609      return;
610    }
611    resp.getResults().forEach((regionName, regionResult) -> {
612      ClientProtos.RegionLoadStats stat = regionResult.getStat();
613      if (stat == null) {
614        if (LOG.isDebugEnabled()) {
615          LOG.debug("No ClientProtos.RegionLoadStats found for server={}, region={}", serverName,
616            Bytes.toStringBinary(regionName));
617        }
618        return;
619      }
620      RegionLoadStats regionLoadStats = ProtobufUtil.createRegionLoadStats(stat);
621      optStats.ifPresent(
622        stats -> ResultStatsUtil.updateStats(stats, serverName, regionName, regionLoadStats));
623      optMetrics.ifPresent(
624        metrics -> ResultStatsUtil.updateStats(metrics, serverName, regionName, regionLoadStats));
625    });
626  }
627
628  public static boolean isUnexpectedPreambleHeaderException(IOException e) {
629    if (!(e instanceof RemoteException)) {
630      return false;
631    }
632    RemoteException re = (RemoteException) e;
633    return FatalConnectionException.class.getName().equals(re.getClassName())
634      && re.getMessage().startsWith("Expected HEADER=");
635  }
636}