001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import com.google.protobuf.Descriptors; 021import com.google.protobuf.Message; 022import com.google.protobuf.RpcController; 023import edu.umd.cs.findbugs.annotations.Nullable; 024import java.io.Closeable; 025import java.io.IOException; 026import java.io.InterruptedIOException; 027import java.util.ArrayList; 028import java.util.Arrays; 029import java.util.Collections; 030import java.util.EnumSet; 031import java.util.HashMap; 032import java.util.Iterator; 033import java.util.List; 034import java.util.Map; 035import java.util.Set; 036import java.util.concurrent.Callable; 037import java.util.concurrent.ExecutionException; 038import java.util.concurrent.Future; 039import java.util.concurrent.TimeUnit; 040import java.util.concurrent.TimeoutException; 041import java.util.concurrent.atomic.AtomicInteger; 042import java.util.concurrent.atomic.AtomicReference; 043import java.util.function.Supplier; 044import java.util.regex.Pattern; 045import java.util.stream.Collectors; 046import java.util.stream.Stream; 047import org.apache.hadoop.conf.Configuration; 048import org.apache.hadoop.hbase.CacheEvictionStats; 049import org.apache.hadoop.hbase.CacheEvictionStatsBuilder; 050import org.apache.hadoop.hbase.ClusterMetrics; 051import org.apache.hadoop.hbase.ClusterMetrics.Option; 052import org.apache.hadoop.hbase.ClusterMetricsBuilder; 053import org.apache.hadoop.hbase.DoNotRetryIOException; 054import org.apache.hadoop.hbase.HBaseConfiguration; 055import org.apache.hadoop.hbase.HConstants; 056import org.apache.hadoop.hbase.HRegionInfo; 057import org.apache.hadoop.hbase.HRegionLocation; 058import org.apache.hadoop.hbase.HTableDescriptor; 059import org.apache.hadoop.hbase.MasterNotRunningException; 060import org.apache.hadoop.hbase.MetaTableAccessor; 061import org.apache.hadoop.hbase.NamespaceDescriptor; 062import org.apache.hadoop.hbase.NamespaceNotFoundException; 063import org.apache.hadoop.hbase.NotServingRegionException; 064import org.apache.hadoop.hbase.RegionLocations; 065import org.apache.hadoop.hbase.RegionMetrics; 066import org.apache.hadoop.hbase.RegionMetricsBuilder; 067import org.apache.hadoop.hbase.ServerName; 068import org.apache.hadoop.hbase.TableExistsException; 069import org.apache.hadoop.hbase.TableName; 070import org.apache.hadoop.hbase.TableNotDisabledException; 071import org.apache.hadoop.hbase.TableNotEnabledException; 072import org.apache.hadoop.hbase.TableNotFoundException; 073import org.apache.hadoop.hbase.UnknownRegionException; 074import org.apache.hadoop.hbase.ZooKeeperConnectionException; 075import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; 076import org.apache.hadoop.hbase.client.replication.TableCFs; 077import org.apache.hadoop.hbase.client.security.SecurityCapability; 078import org.apache.hadoop.hbase.exceptions.TimeoutIOException; 079import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; 080import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; 081import org.apache.hadoop.hbase.ipc.HBaseRpcController; 082import org.apache.hadoop.hbase.ipc.RpcControllerFactory; 083import org.apache.hadoop.hbase.quotas.QuotaFilter; 084import org.apache.hadoop.hbase.quotas.QuotaRetriever; 085import org.apache.hadoop.hbase.quotas.QuotaSettings; 086import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; 087import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; 088import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; 089import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; 090import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest; 091import org.apache.hadoop.hbase.security.access.Permission; 092import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil; 093import org.apache.hadoop.hbase.security.access.UserPermission; 094import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; 095import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; 096import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; 097import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; 098import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; 099import org.apache.hadoop.hbase.util.Addressing; 100import org.apache.hadoop.hbase.util.Bytes; 101import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 102import org.apache.hadoop.hbase.util.ForeignExceptionUtil; 103import org.apache.hadoop.hbase.util.Pair; 104import org.apache.hadoop.hbase.util.Strings; 105import org.apache.hadoop.ipc.RemoteException; 106import org.apache.hadoop.util.StringUtils; 107import org.apache.yetus.audience.InterfaceAudience; 108import org.apache.yetus.audience.InterfaceStability; 109import org.slf4j.Logger; 110import org.slf4j.LoggerFactory; 111 112import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; 113import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; 114import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; 115 116import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 117import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; 118import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; 119import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest; 120import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest; 121import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest; 122import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; 123import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; 124import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest; 125import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest; 126import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse; 127import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; 128import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest; 129import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse; 130import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; 131import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; 132import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; 133import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; 134import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; 135import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; 136import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; 137import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; 138import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; 139import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse; 140import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 141import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; 142import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; 143import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; 144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; 145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; 146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; 147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; 148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; 149import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; 150import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest; 151import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; 152import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; 153import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; 154import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; 155import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; 156import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; 157import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; 158import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; 159import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; 160import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; 161import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; 162import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; 163import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; 164import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; 165import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; 166import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; 167import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; 168import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreRequest; 169import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableRequest; 170import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableResponse; 171import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; 172import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; 173import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest; 174import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse; 175import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; 176import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; 177import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; 178import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest; 179import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse; 180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; 181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; 182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; 183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; 184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; 185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest; 186import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse; 187import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; 188import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; 189import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsRpcThrottleEnabledRequest; 190import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest; 191import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; 192import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; 193import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest; 194import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; 195import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesRequest; 196import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest; 197import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByStateRequest; 198import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByStateResponse; 199import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest; 200import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByStateRequest; 201import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByStateResponse; 202import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest; 203import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest; 204import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; 205import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; 206import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; 207import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; 208import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerRequest; 209import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerResponse; 210import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; 211import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse; 212import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest; 213import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse; 214import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerRequest; 215import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerResponse; 216import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; 217import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; 218import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; 219import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest; 220import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; 221import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; 222import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest; 223import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest; 224import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; 225import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; 226import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; 227import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse; 228import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; 229import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; 230import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; 231import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; 232import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; 233import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse; 234import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse; 235import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes; 236import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse; 237import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot; 238import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; 239import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; 240import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse; 241import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; 242import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; 243import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerModificationProceduresRequest; 244import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerStateRequest; 245import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerStateResponse; 246import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.IsReplicationPeerModificationEnabledRequest; 247import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; 248import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerModificationSwitchRequest; 249import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; 250import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; 251 252/** 253 * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that this 254 * is an HBase-internal class as defined in 255 * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html 256 * There are no guarantees for backwards source / binary compatibility and methods or class can 257 * change or go away without deprecation. Use {@link Connection#getAdmin()} to obtain an instance of 258 * {@link Admin} instead of constructing an HBaseAdmin directly. 259 * <p> 260 * Connection should be an <i>unmanaged</i> connection obtained via 261 * {@link ConnectionFactory#createConnection(Configuration)} 262 * @see ConnectionFactory 263 * @see Connection 264 * @see Admin 265 */ 266@InterfaceAudience.Private 267public class HBaseAdmin implements Admin { 268 private static final Logger LOG = LoggerFactory.getLogger(HBaseAdmin.class); 269 270 private ClusterConnection connection; 271 272 private final Configuration conf; 273 private final long pause; 274 private final int numRetries; 275 private final int syncWaitTimeout; 276 private boolean aborted; 277 private int operationTimeout; 278 private int rpcTimeout; 279 private int getProcedureTimeout; 280 281 private RpcRetryingCallerFactory rpcCallerFactory; 282 private RpcControllerFactory rpcControllerFactory; 283 284 private NonceGenerator ng; 285 286 @Override 287 public int getOperationTimeout() { 288 return operationTimeout; 289 } 290 291 @Override 292 public int getSyncWaitTimeout() { 293 return syncWaitTimeout; 294 } 295 296 HBaseAdmin(ClusterConnection connection) throws IOException { 297 this.conf = connection.getConfiguration(); 298 this.connection = connection; 299 300 // TODO: receive ConnectionConfiguration here rather than re-parsing these configs every time. 301 this.pause = 302 this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); 303 this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 304 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); 305 this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 306 HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); 307 this.rpcTimeout = 308 this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); 309 this.syncWaitTimeout = this.conf.getInt("hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min 310 this.getProcedureTimeout = 311 this.conf.getInt("hbase.client.procedure.future.get.timeout.msec", 10 * 60000); // 10min 312 313 this.rpcCallerFactory = connection.getRpcRetryingCallerFactory(); 314 this.rpcControllerFactory = connection.getRpcControllerFactory(); 315 316 this.ng = this.connection.getNonceGenerator(); 317 } 318 319 @Override 320 public void abort(String why, Throwable e) { 321 // Currently does nothing but throw the passed message and exception 322 this.aborted = true; 323 throw new RuntimeException(why, e); 324 } 325 326 @Override 327 public boolean isAborted() { 328 return this.aborted; 329 } 330 331 @Override 332 public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) 333 throws IOException { 334 return get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout, 335 TimeUnit.MILLISECONDS); 336 } 337 338 @Override 339 public Future<Boolean> abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning) 340 throws IOException { 341 Boolean abortProcResponse = executeCallable( 342 new MasterCallable<AbortProcedureResponse>(getConnection(), getRpcControllerFactory()) { 343 @Override 344 protected AbortProcedureResponse rpcCall() throws Exception { 345 AbortProcedureRequest abortProcRequest = 346 AbortProcedureRequest.newBuilder().setProcId(procId).build(); 347 return master.abortProcedure(getRpcController(), abortProcRequest); 348 } 349 }).getIsProcedureAborted(); 350 return new AbortProcedureFuture(this, procId, abortProcResponse); 351 } 352 353 @Override 354 public List<TableDescriptor> listTableDescriptors() throws IOException { 355 return listTableDescriptors((Pattern) null, false); 356 } 357 358 @Override 359 public List<TableDescriptor> listTableDescriptorsByState(boolean isEnabled) throws IOException { 360 return executeCallable( 361 new MasterCallable<List<TableDescriptor>>(getConnection(), getRpcControllerFactory()) { 362 @Override 363 protected List<TableDescriptor> rpcCall() throws Exception { 364 ListTableDescriptorsByStateResponse response = 365 master.listTableDescriptorsByState(getRpcController(), 366 ListTableDescriptorsByStateRequest.newBuilder().setIsEnabled(isEnabled).build()); 367 return ProtobufUtil.toTableDescriptorList(response); 368 } 369 }); 370 } 371 372 @Override 373 public List<TableDescriptor> listTableDescriptors(Pattern pattern, boolean includeSysTables) 374 throws IOException { 375 return executeCallable( 376 new MasterCallable<List<TableDescriptor>>(getConnection(), getRpcControllerFactory()) { 377 @Override 378 protected List<TableDescriptor> rpcCall() throws Exception { 379 GetTableDescriptorsRequest req = 380 RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables); 381 return ProtobufUtil 382 .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)); 383 } 384 }); 385 } 386 387 @Override 388 public TableDescriptor getDescriptor(TableName tableName) 389 throws TableNotFoundException, IOException { 390 return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory, 391 operationTimeout, rpcTimeout); 392 } 393 394 @Override 395 public Future<Void> modifyTableAsync(TableDescriptor td, boolean reopenRegions) 396 throws IOException { 397 ModifyTableResponse response = executeCallable( 398 new MasterCallable<ModifyTableResponse>(getConnection(), getRpcControllerFactory()) { 399 long nonceGroup = ng.getNonceGroup(); 400 long nonce = ng.newNonce(); 401 402 @Override 403 protected ModifyTableResponse rpcCall() throws Exception { 404 setPriority(td.getTableName()); 405 ModifyTableRequest request = RequestConverter.buildModifyTableRequest(td.getTableName(), 406 td, nonceGroup, nonce, reopenRegions); 407 return master.modifyTable(getRpcController(), request); 408 } 409 }); 410 return new ModifyTableFuture(this, td.getTableName(), response); 411 } 412 413 @Override 414 public Future<Void> modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT) 415 throws IOException { 416 ModifyTableStoreFileTrackerResponse response = 417 executeCallable(new MasterCallable<ModifyTableStoreFileTrackerResponse>(getConnection(), 418 getRpcControllerFactory()) { 419 long nonceGroup = ng.getNonceGroup(); 420 long nonce = ng.newNonce(); 421 422 @Override 423 protected ModifyTableStoreFileTrackerResponse rpcCall() throws Exception { 424 setPriority(tableName); 425 ModifyTableStoreFileTrackerRequest request = RequestConverter 426 .buildModifyTableStoreFileTrackerRequest(tableName, dstSFT, nonceGroup, nonce); 427 return master.modifyTableStoreFileTracker(getRpcController(), request); 428 } 429 }); 430 return new ModifyTablerStoreFileTrackerFuture(this, tableName, response); 431 } 432 433 private static class ModifyTablerStoreFileTrackerFuture extends ModifyTableFuture { 434 public ModifyTablerStoreFileTrackerFuture(HBaseAdmin admin, TableName tableName, 435 ModifyTableStoreFileTrackerResponse response) { 436 super(admin, tableName, 437 (response != null && response.hasProcId()) ? response.getProcId() : null); 438 } 439 440 @Override 441 public String getOperationType() { 442 return "MODIFY_TABLE_STORE_FILE_TRACKER"; 443 } 444 } 445 446 @Override 447 public List<TableDescriptor> listTableDescriptorsByNamespace(byte[] name) throws IOException { 448 return executeCallable( 449 new MasterCallable<List<TableDescriptor>>(getConnection(), getRpcControllerFactory()) { 450 @Override 451 protected List<TableDescriptor> rpcCall() throws Exception { 452 return master 453 .listTableDescriptorsByNamespace(getRpcController(), 454 ListTableDescriptorsByNamespaceRequest.newBuilder() 455 .setNamespaceName(Bytes.toString(name)).build()) 456 .getTableSchemaList().stream().map(ProtobufUtil::toTableDescriptor) 457 .collect(Collectors.toList()); 458 } 459 }); 460 } 461 462 @Override 463 public List<TableDescriptor> listTableDescriptors(List<TableName> tableNames) throws IOException { 464 return executeCallable( 465 new MasterCallable<List<TableDescriptor>>(getConnection(), getRpcControllerFactory()) { 466 @Override 467 protected List<TableDescriptor> rpcCall() throws Exception { 468 GetTableDescriptorsRequest req = 469 RequestConverter.buildGetTableDescriptorsRequest(tableNames); 470 return ProtobufUtil 471 .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)); 472 } 473 }); 474 } 475 476 @Override 477 public List<RegionInfo> getRegions(final ServerName sn) throws IOException { 478 AdminService.BlockingInterface admin = this.connection.getAdmin(sn); 479 // TODO: There is no timeout on this controller. Set one! 480 HBaseRpcController controller = rpcControllerFactory.newController(); 481 return ProtobufUtil.getOnlineRegions(controller, admin); 482 } 483 484 @Override 485 public List<RegionInfo> getRegions(TableName tableName) throws IOException { 486 if (TableName.isMetaTableName(tableName)) { 487 return Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO); 488 } else { 489 return MetaTableAccessor.getTableRegions(connection, tableName, true); 490 } 491 } 492 493 private static class AbortProcedureFuture extends ProcedureFuture<Boolean> { 494 private boolean isAbortInProgress; 495 496 public AbortProcedureFuture(final HBaseAdmin admin, final Long procId, 497 final Boolean abortProcResponse) { 498 super(admin, procId); 499 this.isAbortInProgress = abortProcResponse; 500 } 501 502 @Override 503 public Boolean get(long timeout, TimeUnit unit) 504 throws InterruptedException, ExecutionException, TimeoutException { 505 if (!this.isAbortInProgress) { 506 return false; 507 } 508 super.get(timeout, unit); 509 return true; 510 } 511 } 512 513 /** Returns Connection used by this object. */ 514 @Override 515 public Connection getConnection() { 516 return connection; 517 } 518 519 @Override 520 public boolean tableExists(final TableName tableName) throws IOException { 521 return executeCallable(new RpcRetryingCallable<Boolean>() { 522 @Override 523 protected Boolean rpcCall(int callTimeout) throws Exception { 524 return MetaTableAccessor.getTableState(getConnection(), tableName) != null; 525 } 526 }); 527 } 528 529 @Override 530 public HTableDescriptor[] listTables() throws IOException { 531 return listTables((Pattern) null, false); 532 } 533 534 @Override 535 public HTableDescriptor[] listTables(Pattern pattern) throws IOException { 536 return listTables(pattern, false); 537 } 538 539 @Override 540 public HTableDescriptor[] listTables(String regex) throws IOException { 541 return listTables(Pattern.compile(regex), false); 542 } 543 544 @Override 545 public HTableDescriptor[] listTables(final Pattern pattern, final boolean includeSysTables) 546 throws IOException { 547 return executeCallable( 548 new MasterCallable<HTableDescriptor[]>(getConnection(), getRpcControllerFactory()) { 549 @Override 550 protected HTableDescriptor[] rpcCall() throws Exception { 551 GetTableDescriptorsRequest req = 552 RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables); 553 return ProtobufUtil 554 .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)).stream() 555 .map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new); 556 } 557 }); 558 } 559 560 @Override 561 public HTableDescriptor[] listTables(String regex, boolean includeSysTables) throws IOException { 562 return listTables(Pattern.compile(regex), includeSysTables); 563 } 564 565 @Override 566 public TableName[] listTableNames() throws IOException { 567 return listTableNames((Pattern) null, false); 568 } 569 570 @Override 571 public TableName[] listTableNames(String regex) throws IOException { 572 return listTableNames(Pattern.compile(regex), false); 573 } 574 575 @Override 576 public TableName[] listTableNames(final Pattern pattern, final boolean includeSysTables) 577 throws IOException { 578 return executeCallable( 579 new MasterCallable<TableName[]>(getConnection(), getRpcControllerFactory()) { 580 @Override 581 protected TableName[] rpcCall() throws Exception { 582 GetTableNamesRequest req = 583 RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables); 584 return ProtobufUtil 585 .getTableNameArray(master.getTableNames(getRpcController(), req).getTableNamesList()); 586 } 587 }); 588 } 589 590 @Override 591 public TableName[] listTableNames(final String regex, final boolean includeSysTables) 592 throws IOException { 593 return listTableNames(Pattern.compile(regex), includeSysTables); 594 } 595 596 @Override 597 public List<TableName> listTableNamesByState(boolean isEnabled) throws IOException { 598 return executeCallable( 599 new MasterCallable<List<TableName>>(getConnection(), getRpcControllerFactory()) { 600 @Override 601 protected List<TableName> rpcCall() throws Exception { 602 ListTableNamesByStateResponse response = master.listTableNamesByState(getRpcController(), 603 ListTableNamesByStateRequest.newBuilder().setIsEnabled(isEnabled).build()); 604 return ProtobufUtil.toTableNameList(response.getTableNamesList()); 605 } 606 }); 607 } 608 609 @Override 610 public HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException { 611 return getHTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory, 612 operationTimeout, rpcTimeout); 613 } 614 615 static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection, 616 RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, 617 int operationTimeout, int rpcTimeout) throws IOException { 618 if (tableName == null) return null; 619 TableDescriptor td = 620 executeCallable(new MasterCallable<TableDescriptor>(connection, rpcControllerFactory) { 621 @Override 622 protected TableDescriptor rpcCall() throws Exception { 623 GetTableDescriptorsRequest req = 624 RequestConverter.buildGetTableDescriptorsRequest(tableName); 625 GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); 626 if (!htds.getTableSchemaList().isEmpty()) { 627 return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)); 628 } 629 return null; 630 } 631 }, rpcCallerFactory, operationTimeout, rpcTimeout); 632 if (td != null) { 633 return td; 634 } 635 throw new TableNotFoundException(tableName.getNameAsString()); 636 } 637 638 /** 639 * @deprecated since 2.0 version and will be removed in 3.0 version. use 640 * {@link #getTableDescriptor(TableName, Connection, RpcRetryingCallerFactory,RpcControllerFactory,int,int)} 641 */ 642 @Deprecated 643 static HTableDescriptor getHTableDescriptor(final TableName tableName, Connection connection, 644 RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, 645 int operationTimeout, int rpcTimeout) throws IOException { 646 if (tableName == null) { 647 return null; 648 } 649 HTableDescriptor htd = 650 executeCallable(new MasterCallable<HTableDescriptor>(connection, rpcControllerFactory) { 651 @Override 652 protected HTableDescriptor rpcCall() throws Exception { 653 GetTableDescriptorsRequest req = 654 RequestConverter.buildGetTableDescriptorsRequest(tableName); 655 GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); 656 if (!htds.getTableSchemaList().isEmpty()) { 657 return new ImmutableHTableDescriptor( 658 ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0))); 659 } 660 return null; 661 } 662 }, rpcCallerFactory, operationTimeout, rpcTimeout); 663 if (htd != null) { 664 return new ImmutableHTableDescriptor(htd); 665 } 666 throw new TableNotFoundException(tableName.getNameAsString()); 667 } 668 669 private long getPauseTime(int tries) { 670 int triesCount = tries; 671 if (triesCount >= HConstants.RETRY_BACKOFF.length) { 672 triesCount = HConstants.RETRY_BACKOFF.length - 1; 673 } 674 return this.pause * HConstants.RETRY_BACKOFF[triesCount]; 675 } 676 677 @Override 678 public void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) 679 throws IOException { 680 if (numRegions < 3) { 681 throw new IllegalArgumentException("Must create at least three regions"); 682 } else if (Bytes.compareTo(startKey, endKey) >= 0) { 683 throw new IllegalArgumentException("Start key must be smaller than end key"); 684 } 685 if (numRegions == 3) { 686 createTable(desc, new byte[][] { startKey, endKey }); 687 return; 688 } 689 byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3); 690 if (splitKeys == null || splitKeys.length != numRegions - 1) { 691 throw new IllegalArgumentException("Unable to split key range into enough regions"); 692 } 693 createTable(desc, splitKeys); 694 } 695 696 @Override 697 public Future<Void> createTableAsync(final TableDescriptor desc, final byte[][] splitKeys) 698 throws IOException { 699 if (desc.getTableName() == null) { 700 throw new IllegalArgumentException("TableName cannot be null"); 701 } 702 if (splitKeys != null && splitKeys.length > 0) { 703 Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR); 704 // Verify there are no duplicate split keys 705 byte[] lastKey = null; 706 for (byte[] splitKey : splitKeys) { 707 if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) { 708 throw new IllegalArgumentException( 709 "Empty split key must not be passed in the split keys."); 710 } 711 if (lastKey != null && Bytes.equals(splitKey, lastKey)) { 712 throw new IllegalArgumentException("All split keys must be unique, " + "found duplicate: " 713 + Bytes.toStringBinary(splitKey) + ", " + Bytes.toStringBinary(lastKey)); 714 } 715 lastKey = splitKey; 716 } 717 } 718 719 CreateTableResponse response = executeCallable( 720 new MasterCallable<CreateTableResponse>(getConnection(), getRpcControllerFactory()) { 721 Long nonceGroup = ng.getNonceGroup(); 722 Long nonce = ng.newNonce(); 723 724 @Override 725 protected CreateTableResponse rpcCall() throws Exception { 726 setPriority(desc.getTableName()); 727 CreateTableRequest request = 728 RequestConverter.buildCreateTableRequest(desc, splitKeys, nonceGroup, nonce); 729 return master.createTable(getRpcController(), request); 730 } 731 }); 732 return new CreateTableFuture(this, desc, splitKeys, response); 733 } 734 735 private static class CreateTableFuture extends TableFuture<Void> { 736 private final TableDescriptor desc; 737 private final byte[][] splitKeys; 738 739 public CreateTableFuture(final HBaseAdmin admin, final TableDescriptor desc, 740 final byte[][] splitKeys, final CreateTableResponse response) { 741 super(admin, desc.getTableName(), 742 (response != null && response.hasProcId()) ? response.getProcId() : null); 743 this.splitKeys = splitKeys; 744 this.desc = desc; 745 } 746 747 @Override 748 protected TableDescriptor getTableDescriptor() { 749 return desc; 750 } 751 752 @Override 753 public String getOperationType() { 754 return "CREATE"; 755 } 756 757 @Override 758 protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException { 759 waitForTableEnabled(deadlineTs); 760 waitForAllRegionsOnline(deadlineTs, splitKeys); 761 return null; 762 } 763 } 764 765 @Override 766 public Future<Void> deleteTableAsync(final TableName tableName) throws IOException { 767 DeleteTableResponse response = executeCallable( 768 new MasterCallable<DeleteTableResponse>(getConnection(), getRpcControllerFactory()) { 769 Long nonceGroup = ng.getNonceGroup(); 770 Long nonce = ng.newNonce(); 771 772 @Override 773 protected DeleteTableResponse rpcCall() throws Exception { 774 setPriority(tableName); 775 DeleteTableRequest req = 776 RequestConverter.buildDeleteTableRequest(tableName, nonceGroup, nonce); 777 return master.deleteTable(getRpcController(), req); 778 } 779 }); 780 return new DeleteTableFuture(this, tableName, response); 781 } 782 783 private static class DeleteTableFuture extends TableFuture<Void> { 784 public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName, 785 final DeleteTableResponse response) { 786 super(admin, tableName, 787 (response != null && response.hasProcId()) ? response.getProcId() : null); 788 } 789 790 @Override 791 public String getOperationType() { 792 return "DELETE"; 793 } 794 795 @Override 796 protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException { 797 waitTableNotFound(deadlineTs); 798 return null; 799 } 800 801 @Override 802 protected Void postOperationResult(final Void result, final long deadlineTs) 803 throws IOException, TimeoutException { 804 // Delete cached information to prevent clients from using old locations 805 ((ClusterConnection) getAdmin().getConnection()).clearRegionCache(getTableName()); 806 return super.postOperationResult(result, deadlineTs); 807 } 808 } 809 810 @Override 811 public HTableDescriptor[] deleteTables(String regex) throws IOException { 812 return deleteTables(Pattern.compile(regex)); 813 } 814 815 /** 816 * Delete tables matching the passed in pattern and wait on completion. Warning: Use this method 817 * carefully, there is no prompting and the effect is immediate. Consider using 818 * {@link #listTables(java.util.regex.Pattern) } and {@link #deleteTable(TableName)} 819 * @param pattern The pattern to match table names against 820 * @return Table descriptors for tables that couldn't be deleted 821 * @throws IOException if a remote or network exception occurs 822 */ 823 @Override 824 public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException { 825 List<HTableDescriptor> failed = new ArrayList<>(); 826 for (HTableDescriptor table : listTables(pattern)) { 827 try { 828 deleteTable(table.getTableName()); 829 } catch (IOException ex) { 830 LOG.info("Failed to delete table " + table.getTableName(), ex); 831 failed.add(table); 832 } 833 } 834 return failed.toArray(new HTableDescriptor[failed.size()]); 835 } 836 837 @Override 838 public Future<Void> truncateTableAsync(final TableName tableName, final boolean preserveSplits) 839 throws IOException { 840 TruncateTableResponse response = executeCallable( 841 new MasterCallable<TruncateTableResponse>(getConnection(), getRpcControllerFactory()) { 842 Long nonceGroup = ng.getNonceGroup(); 843 Long nonce = ng.newNonce(); 844 845 @Override 846 protected TruncateTableResponse rpcCall() throws Exception { 847 setPriority(tableName); 848 LOG.info("Started truncating " + tableName); 849 TruncateTableRequest req = RequestConverter.buildTruncateTableRequest(tableName, 850 preserveSplits, nonceGroup, nonce); 851 return master.truncateTable(getRpcController(), req); 852 } 853 }); 854 return new TruncateTableFuture(this, tableName, preserveSplits, response); 855 } 856 857 private static class TruncateTableFuture extends TableFuture<Void> { 858 private final boolean preserveSplits; 859 860 public TruncateTableFuture(final HBaseAdmin admin, final TableName tableName, 861 final boolean preserveSplits, final TruncateTableResponse response) { 862 super(admin, tableName, 863 (response != null && response.hasProcId()) ? response.getProcId() : null); 864 this.preserveSplits = preserveSplits; 865 } 866 867 @Override 868 public String getOperationType() { 869 return "TRUNCATE"; 870 } 871 872 @Override 873 protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException { 874 waitForTableEnabled(deadlineTs); 875 // once the table is enabled, we know the operation is done. so we can fetch the splitKeys 876 byte[][] splitKeys = preserveSplits ? getAdmin().getTableSplits(getTableName()) : null; 877 waitForAllRegionsOnline(deadlineTs, splitKeys); 878 return null; 879 } 880 } 881 882 private byte[][] getTableSplits(final TableName tableName) throws IOException { 883 byte[][] splits = null; 884 try (RegionLocator locator = getConnection().getRegionLocator(tableName)) { 885 byte[][] startKeys = locator.getStartKeys(); 886 if (startKeys.length == 1) { 887 return splits; 888 } 889 splits = new byte[startKeys.length - 1][]; 890 for (int i = 1; i < startKeys.length; i++) { 891 splits[i - 1] = startKeys[i]; 892 } 893 } 894 return splits; 895 } 896 897 @Override 898 public Future<Void> enableTableAsync(final TableName tableName) throws IOException { 899 TableName.isLegalFullyQualifiedTableName(tableName.getName()); 900 EnableTableResponse response = executeCallable( 901 new MasterCallable<EnableTableResponse>(getConnection(), getRpcControllerFactory()) { 902 Long nonceGroup = ng.getNonceGroup(); 903 Long nonce = ng.newNonce(); 904 905 @Override 906 protected EnableTableResponse rpcCall() throws Exception { 907 setPriority(tableName); 908 LOG.info("Started enable of " + tableName); 909 EnableTableRequest req = 910 RequestConverter.buildEnableTableRequest(tableName, nonceGroup, nonce); 911 return master.enableTable(getRpcController(), req); 912 } 913 }); 914 return new EnableTableFuture(this, tableName, response); 915 } 916 917 private static class EnableTableFuture extends TableFuture<Void> { 918 public EnableTableFuture(final HBaseAdmin admin, final TableName tableName, 919 final EnableTableResponse response) { 920 super(admin, tableName, 921 (response != null && response.hasProcId()) ? response.getProcId() : null); 922 } 923 924 @Override 925 public String getOperationType() { 926 return "ENABLE"; 927 } 928 929 @Override 930 protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException { 931 waitForTableEnabled(deadlineTs); 932 return null; 933 } 934 } 935 936 @Override 937 public HTableDescriptor[] enableTables(String regex) throws IOException { 938 return enableTables(Pattern.compile(regex)); 939 } 940 941 @Override 942 public HTableDescriptor[] enableTables(Pattern pattern) throws IOException { 943 List<HTableDescriptor> failed = new ArrayList<>(); 944 for (HTableDescriptor table : listTables(pattern)) { 945 if (isTableDisabled(table.getTableName())) { 946 try { 947 enableTable(table.getTableName()); 948 } catch (IOException ex) { 949 LOG.info("Failed to enable table " + table.getTableName(), ex); 950 failed.add(table); 951 } 952 } 953 } 954 return failed.toArray(new HTableDescriptor[failed.size()]); 955 } 956 957 @Override 958 public Future<Void> disableTableAsync(final TableName tableName) throws IOException { 959 TableName.isLegalFullyQualifiedTableName(tableName.getName()); 960 DisableTableResponse response = executeCallable( 961 new MasterCallable<DisableTableResponse>(getConnection(), getRpcControllerFactory()) { 962 Long nonceGroup = ng.getNonceGroup(); 963 Long nonce = ng.newNonce(); 964 965 @Override 966 protected DisableTableResponse rpcCall() throws Exception { 967 setPriority(tableName); 968 LOG.info("Started disable of " + tableName); 969 DisableTableRequest req = 970 RequestConverter.buildDisableTableRequest(tableName, nonceGroup, nonce); 971 return master.disableTable(getRpcController(), req); 972 } 973 }); 974 return new DisableTableFuture(this, tableName, response); 975 } 976 977 private static class DisableTableFuture extends TableFuture<Void> { 978 public DisableTableFuture(final HBaseAdmin admin, final TableName tableName, 979 final DisableTableResponse response) { 980 super(admin, tableName, 981 (response != null && response.hasProcId()) ? response.getProcId() : null); 982 } 983 984 @Override 985 public String getOperationType() { 986 return "DISABLE"; 987 } 988 989 @Override 990 protected Void waitOperationResult(long deadlineTs) throws IOException, TimeoutException { 991 waitForTableDisabled(deadlineTs); 992 return null; 993 } 994 } 995 996 @Override 997 public HTableDescriptor[] disableTables(String regex) throws IOException { 998 return disableTables(Pattern.compile(regex)); 999 } 1000 1001 @Override 1002 public HTableDescriptor[] disableTables(Pattern pattern) throws IOException { 1003 List<HTableDescriptor> failed = new ArrayList<>(); 1004 for (HTableDescriptor table : listTables(pattern)) { 1005 if (isTableEnabled(table.getTableName())) { 1006 try { 1007 disableTable(table.getTableName()); 1008 } catch (IOException ex) { 1009 LOG.info("Failed to disable table " + table.getTableName(), ex); 1010 failed.add(table); 1011 } 1012 } 1013 } 1014 return failed.toArray(new HTableDescriptor[failed.size()]); 1015 } 1016 1017 @Override 1018 public boolean isTableEnabled(final TableName tableName) throws IOException { 1019 checkTableExists(tableName); 1020 return executeCallable(new RpcRetryingCallable<Boolean>() { 1021 @Override 1022 protected Boolean rpcCall(int callTimeout) throws Exception { 1023 TableState tableState = MetaTableAccessor.getTableState(getConnection(), tableName); 1024 if (tableState == null) { 1025 throw new TableNotFoundException(tableName); 1026 } 1027 return tableState.inStates(TableState.State.ENABLED); 1028 } 1029 }); 1030 } 1031 1032 @Override 1033 public boolean isTableDisabled(TableName tableName) throws IOException { 1034 checkTableExists(tableName); 1035 return connection.isTableDisabled(tableName); 1036 } 1037 1038 @Override 1039 public boolean isTableAvailable(TableName tableName) throws IOException { 1040 return connection.isTableAvailable(tableName, null); 1041 } 1042 1043 @Override 1044 public boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException { 1045 return connection.isTableAvailable(tableName, splitKeys); 1046 } 1047 1048 @Override 1049 public Pair<Integer, Integer> getAlterStatus(final TableName tableName) throws IOException { 1050 return executeCallable( 1051 new MasterCallable<Pair<Integer, Integer>>(getConnection(), getRpcControllerFactory()) { 1052 @Override 1053 protected Pair<Integer, Integer> rpcCall() throws Exception { 1054 setPriority(tableName); 1055 GetSchemaAlterStatusRequest req = 1056 RequestConverter.buildGetSchemaAlterStatusRequest(tableName); 1057 GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(getRpcController(), req); 1058 Pair<Integer, Integer> pair = 1059 new Pair<>(ret.getYetToUpdateRegions(), ret.getTotalRegions()); 1060 return pair; 1061 } 1062 }); 1063 } 1064 1065 @Override 1066 public Pair<Integer, Integer> getAlterStatus(final byte[] tableName) throws IOException { 1067 return getAlterStatus(TableName.valueOf(tableName)); 1068 } 1069 1070 @Override 1071 public Future<Void> addColumnFamilyAsync(final TableName tableName, 1072 final ColumnFamilyDescriptor columnFamily) throws IOException { 1073 AddColumnResponse response = executeCallable( 1074 new MasterCallable<AddColumnResponse>(getConnection(), getRpcControllerFactory()) { 1075 Long nonceGroup = ng.getNonceGroup(); 1076 Long nonce = ng.newNonce(); 1077 1078 @Override 1079 protected AddColumnResponse rpcCall() throws Exception { 1080 setPriority(tableName); 1081 AddColumnRequest req = 1082 RequestConverter.buildAddColumnRequest(tableName, columnFamily, nonceGroup, nonce); 1083 return master.addColumn(getRpcController(), req); 1084 } 1085 }); 1086 return new AddColumnFamilyFuture(this, tableName, response); 1087 } 1088 1089 private static class AddColumnFamilyFuture extends ModifyTableFuture { 1090 public AddColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName, 1091 final AddColumnResponse response) { 1092 super(admin, tableName, 1093 (response != null && response.hasProcId()) ? response.getProcId() : null); 1094 } 1095 1096 @Override 1097 public String getOperationType() { 1098 return "ADD_COLUMN_FAMILY"; 1099 } 1100 } 1101 1102 /** 1103 * {@inheritDoc} 1104 * @deprecated Since 2.0. Will be removed in 3.0. Use 1105 * {@link #deleteColumnFamily(TableName, byte[])} instead. 1106 */ 1107 @Override 1108 @Deprecated 1109 public void deleteColumn(final TableName tableName, final byte[] columnFamily) 1110 throws IOException { 1111 deleteColumnFamily(tableName, columnFamily); 1112 } 1113 1114 @Override 1115 public Future<Void> deleteColumnFamilyAsync(final TableName tableName, final byte[] columnFamily) 1116 throws IOException { 1117 DeleteColumnResponse response = executeCallable( 1118 new MasterCallable<DeleteColumnResponse>(getConnection(), getRpcControllerFactory()) { 1119 Long nonceGroup = ng.getNonceGroup(); 1120 Long nonce = ng.newNonce(); 1121 1122 @Override 1123 protected DeleteColumnResponse rpcCall() throws Exception { 1124 setPriority(tableName); 1125 DeleteColumnRequest req = 1126 RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, nonceGroup, nonce); 1127 return master.deleteColumn(getRpcController(), req); 1128 } 1129 }); 1130 return new DeleteColumnFamilyFuture(this, tableName, response); 1131 } 1132 1133 private static class DeleteColumnFamilyFuture extends ModifyTableFuture { 1134 public DeleteColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName, 1135 final DeleteColumnResponse response) { 1136 super(admin, tableName, 1137 (response != null && response.hasProcId()) ? response.getProcId() : null); 1138 } 1139 1140 @Override 1141 public String getOperationType() { 1142 return "DELETE_COLUMN_FAMILY"; 1143 } 1144 } 1145 1146 @Override 1147 public Future<Void> modifyColumnFamilyAsync(final TableName tableName, 1148 final ColumnFamilyDescriptor columnFamily) throws IOException { 1149 ModifyColumnResponse response = executeCallable( 1150 new MasterCallable<ModifyColumnResponse>(getConnection(), getRpcControllerFactory()) { 1151 long nonceGroup = ng.getNonceGroup(); 1152 long nonce = ng.newNonce(); 1153 1154 @Override 1155 protected ModifyColumnResponse rpcCall() throws Exception { 1156 setPriority(tableName); 1157 ModifyColumnRequest req = 1158 RequestConverter.buildModifyColumnRequest(tableName, columnFamily, nonceGroup, nonce); 1159 return master.modifyColumn(getRpcController(), req); 1160 } 1161 }); 1162 return new ModifyColumnFamilyFuture(this, tableName, response); 1163 } 1164 1165 private static class ModifyColumnFamilyFuture extends ModifyTableFuture { 1166 public ModifyColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName, 1167 final ModifyColumnResponse response) { 1168 super(admin, tableName, 1169 (response != null && response.hasProcId()) ? response.getProcId() : null); 1170 } 1171 1172 @Override 1173 public String getOperationType() { 1174 return "MODIFY_COLUMN_FAMILY"; 1175 } 1176 } 1177 1178 @Override 1179 public Future<Void> modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] family, 1180 String dstSFT) throws IOException { 1181 ModifyColumnStoreFileTrackerResponse response = 1182 executeCallable(new MasterCallable<ModifyColumnStoreFileTrackerResponse>(getConnection(), 1183 getRpcControllerFactory()) { 1184 long nonceGroup = ng.getNonceGroup(); 1185 long nonce = ng.newNonce(); 1186 1187 @Override 1188 protected ModifyColumnStoreFileTrackerResponse rpcCall() throws Exception { 1189 setPriority(tableName); 1190 ModifyColumnStoreFileTrackerRequest req = RequestConverter 1191 .buildModifyColumnStoreFileTrackerRequest(tableName, family, dstSFT, nonceGroup, nonce); 1192 return master.modifyColumnStoreFileTracker(getRpcController(), req); 1193 } 1194 }); 1195 return new ModifyColumnFamilyStoreFileTrackerFuture(this, tableName, response); 1196 } 1197 1198 private static class ModifyColumnFamilyStoreFileTrackerFuture extends ModifyTableFuture { 1199 public ModifyColumnFamilyStoreFileTrackerFuture(HBaseAdmin admin, TableName tableName, 1200 final ModifyColumnStoreFileTrackerResponse response) { 1201 super(admin, tableName, 1202 (response != null && response.hasProcId()) ? response.getProcId() : null); 1203 } 1204 1205 @Override 1206 public String getOperationType() { 1207 return "MODIFY_COLUMN_FAMILY_STORE_FILE_TRACKER"; 1208 } 1209 } 1210 1211 @Deprecated 1212 @Override 1213 public void closeRegion(final String regionName, final String unused) throws IOException { 1214 unassign(Bytes.toBytes(regionName), true); 1215 } 1216 1217 @Deprecated 1218 @Override 1219 public void closeRegion(final byte[] regionName, final String unused) throws IOException { 1220 unassign(regionName, true); 1221 } 1222 1223 @Deprecated 1224 @Override 1225 public boolean closeRegionWithEncodedRegionName(final String encodedRegionName, 1226 final String unused) throws IOException { 1227 unassign(Bytes.toBytes(encodedRegionName), true); 1228 return true; 1229 } 1230 1231 @Deprecated 1232 @Override 1233 public void closeRegion(final ServerName unused, final HRegionInfo hri) throws IOException { 1234 unassign(hri.getRegionName(), true); 1235 } 1236 1237 /** 1238 * @return List of {@link HRegionInfo}. 1239 * @throws IOException if a remote or network exception occurs 1240 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use 1241 * {@link #getRegions(ServerName)}. 1242 */ 1243 @Deprecated 1244 @Override 1245 public List<HRegionInfo> getOnlineRegions(final ServerName sn) throws IOException { 1246 return getRegions(sn).stream().map(ImmutableHRegionInfo::new).collect(Collectors.toList()); 1247 } 1248 1249 @Override 1250 public void flush(final TableName tableName) throws IOException { 1251 flush(tableName, Collections.emptyList()); 1252 } 1253 1254 @Override 1255 public void flush(final TableName tableName, byte[] columnFamily) throws IOException { 1256 flush(tableName, Collections.singletonList(columnFamily)); 1257 } 1258 1259 @Override 1260 public Future<Void> flushAsync(TableName tableName, List<byte[]> columnFamilies) 1261 throws IOException { 1262 // check if the table exists and enabled 1263 if (!isTableEnabled(tableName)) { 1264 throw new TableNotEnabledException(tableName.getNameAsString()); 1265 } 1266 // remove duplicate column families 1267 List<byte[]> columnFamilyList = columnFamilies.stream() 1268 .filter(cf -> cf != null && cf.length > 0).distinct().collect(Collectors.toList()); 1269 1270 try { 1271 FlushTableResponse resp = executeCallable( 1272 new MasterCallable<FlushTableResponse>(getConnection(), getRpcControllerFactory()) { 1273 final long nonceGroup = ng.getNonceGroup(); 1274 final long nonce = ng.newNonce(); 1275 1276 @Override 1277 protected FlushTableResponse rpcCall() throws Exception { 1278 FlushTableRequest request = RequestConverter.buildFlushTableRequest(tableName, 1279 columnFamilyList, nonceGroup, nonce); 1280 return master.flushTable(getRpcController(), request); 1281 } 1282 }); 1283 return new FlushTableFuture(this, tableName, resp); 1284 } catch (DoNotRetryIOException e) { 1285 // This is for keeping compatibility with old implementation. 1286 // usually the exception caused by the method is not present on the server or 1287 // the hbase hadoop version does not match the running hadoop version or 1288 // the FlushTableProcedure is disabled, if that happens, we need fall back 1289 // to the old flush implementation. 1290 Map<String, String> props = new HashMap<>(); 1291 if (!columnFamilies.isEmpty()) { 1292 props.put(HConstants.FAMILY_KEY_STR, Strings.JOINER 1293 .join(columnFamilies.stream().map(Bytes::toString).collect(Collectors.toList()))); 1294 } 1295 1296 executeCallable( 1297 new MasterCallable<ExecProcedureResponse>(getConnection(), getRpcControllerFactory()) { 1298 @Override 1299 protected ExecProcedureResponse rpcCall() throws Exception { 1300 ExecProcedureRequest request = ExecProcedureRequest.newBuilder() 1301 .setProcedure(ProtobufUtil.buildProcedureDescription("flush-table-proc", 1302 tableName.getNameAsString(), props)) 1303 .build(); 1304 return master.execProcedure(getRpcController(), request); 1305 } 1306 }); 1307 return new LegacyFlushFuture(this, tableName, props); 1308 } 1309 } 1310 1311 private static class FlushTableFuture extends TableFuture<Void> { 1312 1313 public FlushTableFuture(final HBaseAdmin admin, final TableName tableName, 1314 final FlushTableResponse resp) { 1315 super(admin, tableName, (resp != null && resp.hasProcId()) ? resp.getProcId() : null); 1316 } 1317 1318 @Override 1319 public String getOperationType() { 1320 return "FLUSH"; 1321 } 1322 } 1323 1324 private static class LegacyFlushFuture extends TableFuture<Void> { 1325 1326 private final Map<String, String> props; 1327 1328 public LegacyFlushFuture(HBaseAdmin admin, TableName tableName, Map<String, String> props) { 1329 super(admin, tableName, null); 1330 this.props = props; 1331 } 1332 1333 @Override 1334 public String getOperationType() { 1335 return "FLUSH"; 1336 } 1337 1338 @Override 1339 protected Void waitOperationResult(long deadlineTs) throws IOException, TimeoutException { 1340 waitForState(deadlineTs, new TableWaitForStateCallable() { 1341 @Override 1342 public boolean checkState(int tries) throws IOException { 1343 return getAdmin().isProcedureFinished("flush-table-proc", 1344 getTableName().getNameAsString(), props); 1345 } 1346 }); 1347 return null; 1348 } 1349 } 1350 1351 @Override 1352 public void flushRegion(final byte[] regionName) throws IOException { 1353 flushRegion(regionName, null); 1354 } 1355 1356 @Override 1357 public void flushRegion(final byte[] regionName, byte[] columnFamily) throws IOException { 1358 Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName); 1359 if (regionServerPair == null) { 1360 throw new IllegalArgumentException("Unknown regionname: " + Bytes.toStringBinary(regionName)); 1361 } 1362 if (regionServerPair.getSecond() == null) { 1363 throw new NoServerForRegionException(Bytes.toStringBinary(regionName)); 1364 } 1365 final RegionInfo regionInfo = regionServerPair.getFirst(); 1366 ServerName serverName = regionServerPair.getSecond(); 1367 flush(this.connection.getAdmin(serverName), regionInfo, columnFamily); 1368 } 1369 1370 private void flush(AdminService.BlockingInterface admin, final RegionInfo info, 1371 byte[] columnFamily) throws IOException { 1372 ProtobufUtil.call(() -> { 1373 // TODO: There is no timeout on this controller. Set one! 1374 HBaseRpcController controller = rpcControllerFactory.newController(); 1375 FlushRegionRequest request = 1376 RequestConverter.buildFlushRegionRequest(info.getRegionName(), columnFamily, false); 1377 admin.flushRegion(controller, request); 1378 return null; 1379 }); 1380 } 1381 1382 @Override 1383 public void flushRegionServer(ServerName serverName) throws IOException { 1384 for (RegionInfo region : getRegions(serverName)) { 1385 flush(this.connection.getAdmin(serverName), region, null); 1386 } 1387 } 1388 1389 /** 1390 * {@inheritDoc} 1391 */ 1392 @Override 1393 public void compact(final TableName tableName) throws IOException { 1394 compact(tableName, null, false, CompactType.NORMAL); 1395 } 1396 1397 @Override 1398 public void compactRegion(final byte[] regionName) throws IOException { 1399 compactRegion(regionName, null, false); 1400 } 1401 1402 /** 1403 * {@inheritDoc} 1404 */ 1405 @Override 1406 public void compact(final TableName tableName, final byte[] columnFamily) throws IOException { 1407 compact(tableName, columnFamily, false, CompactType.NORMAL); 1408 } 1409 1410 /** 1411 * {@inheritDoc} 1412 */ 1413 @Override 1414 public void compactRegion(final byte[] regionName, final byte[] columnFamily) throws IOException { 1415 compactRegion(regionName, columnFamily, false); 1416 } 1417 1418 @Override 1419 public Map<ServerName, Boolean> compactionSwitch(boolean switchState, 1420 List<String> serverNamesList) throws IOException { 1421 List<ServerName> serverList = new ArrayList<>(); 1422 if (serverNamesList.isEmpty()) { 1423 ClusterMetrics status = getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)); 1424 serverList.addAll(status.getLiveServerMetrics().keySet()); 1425 } else { 1426 for (String regionServerName : serverNamesList) { 1427 ServerName serverName = null; 1428 try { 1429 serverName = ServerName.valueOf(regionServerName); 1430 } catch (Exception e) { 1431 throw new IllegalArgumentException( 1432 String.format("Invalid ServerName format: %s", regionServerName)); 1433 } 1434 if (serverName == null) { 1435 throw new IllegalArgumentException( 1436 String.format("Null ServerName: %s", regionServerName)); 1437 } 1438 serverList.add(serverName); 1439 } 1440 } 1441 Map<ServerName, Boolean> res = new HashMap<>(serverList.size()); 1442 for (ServerName serverName : serverList) { 1443 boolean prev_state = switchCompact(this.connection.getAdmin(serverName), switchState); 1444 res.put(serverName, prev_state); 1445 } 1446 return res; 1447 } 1448 1449 private Boolean switchCompact(AdminService.BlockingInterface admin, boolean onOrOff) 1450 throws IOException { 1451 return executeCallable(new RpcRetryingCallable<Boolean>() { 1452 @Override 1453 protected Boolean rpcCall(int callTimeout) throws Exception { 1454 HBaseRpcController controller = rpcControllerFactory.newController(); 1455 CompactionSwitchRequest request = 1456 CompactionSwitchRequest.newBuilder().setEnabled(onOrOff).build(); 1457 CompactionSwitchResponse compactionSwitchResponse = 1458 admin.compactionSwitch(controller, request); 1459 return compactionSwitchResponse.getPrevState(); 1460 } 1461 }); 1462 } 1463 1464 @Override 1465 public void compactRegionServer(final ServerName serverName) throws IOException { 1466 for (RegionInfo region : getRegions(serverName)) { 1467 compact(this.connection.getAdmin(serverName), region, false, null); 1468 } 1469 } 1470 1471 @Override 1472 public void majorCompactRegionServer(final ServerName serverName) throws IOException { 1473 for (RegionInfo region : getRegions(serverName)) { 1474 compact(this.connection.getAdmin(serverName), region, true, null); 1475 } 1476 } 1477 1478 @Override 1479 public void majorCompact(final TableName tableName) throws IOException { 1480 compact(tableName, null, true, CompactType.NORMAL); 1481 } 1482 1483 @Override 1484 public void majorCompactRegion(final byte[] regionName) throws IOException { 1485 compactRegion(regionName, null, true); 1486 } 1487 1488 /** 1489 * {@inheritDoc} 1490 */ 1491 @Override 1492 public void majorCompact(final TableName tableName, final byte[] columnFamily) 1493 throws IOException { 1494 compact(tableName, columnFamily, true, CompactType.NORMAL); 1495 } 1496 1497 @Override 1498 public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily) 1499 throws IOException { 1500 compactRegion(regionName, columnFamily, true); 1501 } 1502 1503 /** 1504 * Compact a table. Asynchronous operation. 1505 * @param tableName table or region to compact 1506 * @param columnFamily column family within a table or region 1507 * @param major True if we are to do a major compaction. 1508 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} 1509 * @throws IOException if a remote or network exception occurs 1510 */ 1511 private void compact(final TableName tableName, final byte[] columnFamily, final boolean major, 1512 CompactType compactType) throws IOException { 1513 switch (compactType) { 1514 case MOB: 1515 compact(this.connection.getAdminForMaster(), RegionInfo.createMobRegionInfo(tableName), 1516 major, columnFamily); 1517 break; 1518 case NORMAL: 1519 checkTableExists(tableName); 1520 for (HRegionLocation loc : connection.locateRegions(tableName, false, false)) { 1521 ServerName sn = loc.getServerName(); 1522 if (sn == null) { 1523 continue; 1524 } 1525 try { 1526 compact(this.connection.getAdmin(sn), loc.getRegion(), major, columnFamily); 1527 } catch (NotServingRegionException e) { 1528 if (LOG.isDebugEnabled()) { 1529 LOG.debug("Trying to" + (major ? " major" : "") + " compact " + loc.getRegion() + ": " 1530 + StringUtils.stringifyException(e)); 1531 } 1532 } 1533 } 1534 break; 1535 default: 1536 throw new IllegalArgumentException("Unknown compactType: " + compactType); 1537 } 1538 } 1539 1540 /** 1541 * Compact an individual region. Asynchronous operation. 1542 * @param regionName region to compact 1543 * @param columnFamily column family within a table or region 1544 * @param major True if we are to do a major compaction. 1545 * @throws IOException if a remote or network exception occurs 1546 */ 1547 private void compactRegion(final byte[] regionName, final byte[] columnFamily, 1548 final boolean major) throws IOException { 1549 Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName); 1550 if (regionServerPair == null) { 1551 throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName)); 1552 } 1553 if (regionServerPair.getSecond() == null) { 1554 throw new NoServerForRegionException(Bytes.toStringBinary(regionName)); 1555 } 1556 compact(this.connection.getAdmin(regionServerPair.getSecond()), regionServerPair.getFirst(), 1557 major, columnFamily); 1558 } 1559 1560 private void compact(AdminService.BlockingInterface admin, RegionInfo hri, boolean major, 1561 byte[] family) throws IOException { 1562 Callable<Void> callable = new Callable<Void>() { 1563 @Override 1564 public Void call() throws Exception { 1565 // TODO: There is no timeout on this controller. Set one! 1566 HBaseRpcController controller = rpcControllerFactory.newController(); 1567 CompactRegionRequest request = 1568 RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family); 1569 admin.compactRegion(controller, request); 1570 return null; 1571 } 1572 }; 1573 ProtobufUtil.call(callable); 1574 } 1575 1576 @Override 1577 public void move(byte[] encodedRegionName) throws IOException { 1578 move(encodedRegionName, (ServerName) null); 1579 } 1580 1581 @Override 1582 public void move(final byte[] encodedRegionName, ServerName destServerName) throws IOException { 1583 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 1584 @Override 1585 protected Void rpcCall() throws Exception { 1586 setPriority(encodedRegionName); 1587 MoveRegionRequest request = 1588 RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName); 1589 master.moveRegion(getRpcController(), request); 1590 return null; 1591 } 1592 }); 1593 } 1594 1595 @Override 1596 public void assign(final byte[] regionName) 1597 throws MasterNotRunningException, ZooKeeperConnectionException, IOException { 1598 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 1599 @Override 1600 protected Void rpcCall() throws Exception { 1601 setPriority(regionName); 1602 AssignRegionRequest request = 1603 RequestConverter.buildAssignRegionRequest(getRegionName(regionName)); 1604 master.assignRegion(getRpcController(), request); 1605 return null; 1606 } 1607 }); 1608 } 1609 1610 @Override 1611 public void unassign(final byte[] regionName) throws IOException { 1612 final byte[] toBeUnassigned = getRegionName(regionName); 1613 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 1614 @Override 1615 protected Void rpcCall() throws Exception { 1616 setPriority(regionName); 1617 UnassignRegionRequest request = RequestConverter.buildUnassignRegionRequest(toBeUnassigned); 1618 master.unassignRegion(getRpcController(), request); 1619 return null; 1620 } 1621 }); 1622 } 1623 1624 @Override 1625 public void offline(final byte[] regionName) throws IOException { 1626 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 1627 @Override 1628 protected Void rpcCall() throws Exception { 1629 setPriority(regionName); 1630 master.offlineRegion(getRpcController(), 1631 RequestConverter.buildOfflineRegionRequest(regionName)); 1632 return null; 1633 } 1634 }); 1635 } 1636 1637 @Override 1638 public boolean balancerSwitch(final boolean on, final boolean synchronous) throws IOException { 1639 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1640 @Override 1641 protected Boolean rpcCall() throws Exception { 1642 SetBalancerRunningRequest req = 1643 RequestConverter.buildSetBalancerRunningRequest(on, synchronous); 1644 return master.setBalancerRunning(getRpcController(), req).getPrevBalanceValue(); 1645 } 1646 }); 1647 } 1648 1649 @Override 1650 public BalanceResponse balance(BalanceRequest request) throws IOException { 1651 return executeCallable( 1652 new MasterCallable<BalanceResponse>(getConnection(), getRpcControllerFactory()) { 1653 @Override 1654 protected BalanceResponse rpcCall() throws Exception { 1655 MasterProtos.BalanceRequest req = ProtobufUtil.toBalanceRequest(request); 1656 return ProtobufUtil.toBalanceResponse(master.balance(getRpcController(), req)); 1657 } 1658 }); 1659 } 1660 1661 @Override 1662 public boolean isBalancerEnabled() throws IOException { 1663 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1664 @Override 1665 protected Boolean rpcCall() throws Exception { 1666 return master 1667 .isBalancerEnabled(getRpcController(), RequestConverter.buildIsBalancerEnabledRequest()) 1668 .getEnabled(); 1669 } 1670 }); 1671 } 1672 1673 /** 1674 * {@inheritDoc} 1675 */ 1676 @Override 1677 public CacheEvictionStats clearBlockCache(final TableName tableName) throws IOException { 1678 checkTableExists(tableName); 1679 CacheEvictionStatsBuilder cacheEvictionStats = CacheEvictionStats.builder(); 1680 List<Pair<RegionInfo, ServerName>> pairs = 1681 MetaTableAccessor.getTableRegionsAndLocations(connection, tableName); 1682 Map<ServerName, 1683 List<RegionInfo>> regionInfoByServerName = pairs.stream() 1684 .filter(pair -> !pair.getFirst().isOffline()).filter(pair -> pair.getSecond() != null) 1685 .collect(Collectors.groupingBy(pair -> pair.getSecond(), 1686 Collectors.mapping(pair -> pair.getFirst(), Collectors.toList()))); 1687 1688 for (Map.Entry<ServerName, List<RegionInfo>> entry : regionInfoByServerName.entrySet()) { 1689 CacheEvictionStats stats = clearBlockCache(entry.getKey(), entry.getValue()); 1690 cacheEvictionStats = cacheEvictionStats.append(stats); 1691 if (stats.getExceptionCount() > 0) { 1692 for (Map.Entry<byte[], Throwable> exception : stats.getExceptions().entrySet()) { 1693 LOG.debug("Failed to clear block cache for " + Bytes.toStringBinary(exception.getKey()) 1694 + " on " + entry.getKey() + ": ", exception.getValue()); 1695 } 1696 } 1697 } 1698 return cacheEvictionStats.build(); 1699 } 1700 1701 private CacheEvictionStats clearBlockCache(final ServerName sn, final List<RegionInfo> hris) 1702 throws IOException { 1703 HBaseRpcController controller = rpcControllerFactory.newController(); 1704 AdminService.BlockingInterface admin = this.connection.getAdmin(sn); 1705 ClearRegionBlockCacheRequest request = RequestConverter.buildClearRegionBlockCacheRequest(hris); 1706 ClearRegionBlockCacheResponse response; 1707 try { 1708 response = admin.clearRegionBlockCache(controller, request); 1709 return ProtobufUtil.toCacheEvictionStats(response.getStats()); 1710 } catch (ServiceException se) { 1711 throw ProtobufUtil.getRemoteException(se); 1712 } 1713 } 1714 1715 @Override 1716 public boolean normalize(NormalizeTableFilterParams ntfp) throws IOException { 1717 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1718 @Override 1719 protected Boolean rpcCall() throws Exception { 1720 return master.normalize(getRpcController(), RequestConverter.buildNormalizeRequest(ntfp)) 1721 .getNormalizerRan(); 1722 } 1723 }); 1724 } 1725 1726 @Override 1727 public boolean isNormalizerEnabled() throws IOException { 1728 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1729 @Override 1730 protected Boolean rpcCall() throws Exception { 1731 return master.isNormalizerEnabled(getRpcController(), 1732 RequestConverter.buildIsNormalizerEnabledRequest()).getEnabled(); 1733 } 1734 }); 1735 } 1736 1737 @Override 1738 public boolean normalizerSwitch(final boolean on) throws IOException { 1739 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1740 @Override 1741 protected Boolean rpcCall() throws Exception { 1742 SetNormalizerRunningRequest req = RequestConverter.buildSetNormalizerRunningRequest(on); 1743 return master.setNormalizerRunning(getRpcController(), req).getPrevNormalizerValue(); 1744 } 1745 }); 1746 } 1747 1748 @Override 1749 public boolean catalogJanitorSwitch(final boolean enable) throws IOException { 1750 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1751 @Override 1752 protected Boolean rpcCall() throws Exception { 1753 return master.enableCatalogJanitor(getRpcController(), 1754 RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue(); 1755 } 1756 }); 1757 } 1758 1759 @Override 1760 public int runCatalogJanitor() throws IOException { 1761 return executeCallable(new MasterCallable<Integer>(getConnection(), getRpcControllerFactory()) { 1762 @Override 1763 protected Integer rpcCall() throws Exception { 1764 return master.runCatalogScan(getRpcController(), RequestConverter.buildCatalogScanRequest()) 1765 .getScanResult(); 1766 } 1767 }); 1768 } 1769 1770 @Override 1771 public boolean isCatalogJanitorEnabled() throws IOException { 1772 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1773 @Override 1774 protected Boolean rpcCall() throws Exception { 1775 return master.isCatalogJanitorEnabled(getRpcController(), 1776 RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue(); 1777 } 1778 }); 1779 } 1780 1781 @Override 1782 public boolean cleanerChoreSwitch(final boolean on) throws IOException { 1783 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1784 @Override 1785 public Boolean rpcCall() throws Exception { 1786 return master.setCleanerChoreRunning(getRpcController(), 1787 RequestConverter.buildSetCleanerChoreRunningRequest(on)).getPrevValue(); 1788 } 1789 }); 1790 } 1791 1792 @Override 1793 public boolean runCleanerChore() throws IOException { 1794 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1795 @Override 1796 public Boolean rpcCall() throws Exception { 1797 return master 1798 .runCleanerChore(getRpcController(), RequestConverter.buildRunCleanerChoreRequest()) 1799 .getCleanerChoreRan(); 1800 } 1801 }); 1802 } 1803 1804 @Override 1805 public boolean isCleanerChoreEnabled() throws IOException { 1806 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1807 @Override 1808 public Boolean rpcCall() throws Exception { 1809 return master.isCleanerChoreEnabled(getRpcController(), 1810 RequestConverter.buildIsCleanerChoreEnabledRequest()).getValue(); 1811 } 1812 }); 1813 } 1814 1815 /** 1816 * Merge two regions. Synchronous operation. Note: It is not feasible to predict the length of 1817 * merge. Therefore, this is for internal testing only. 1818 * @param nameOfRegionA encoded or full name of region a 1819 * @param nameOfRegionB encoded or full name of region b 1820 * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent 1821 * regions 1822 * @throws IOException if a remote or network exception occurs 1823 */ 1824 public void mergeRegionsSync(final byte[] nameOfRegionA, final byte[] nameOfRegionB, 1825 final boolean forcible) throws IOException { 1826 get(mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible), syncWaitTimeout, 1827 TimeUnit.MILLISECONDS); 1828 } 1829 1830 /** 1831 * Merge two regions. Asynchronous operation. 1832 * @param nameOfRegionA encoded or full name of region a 1833 * @param nameOfRegionB encoded or full name of region b 1834 * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent 1835 * regions 1836 * @throws IOException if a remote or network exception occurs 1837 * @deprecated Since 2.0. Will be removed in 3.0. Use 1838 * {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead. 1839 */ 1840 @Deprecated 1841 @Override 1842 public void mergeRegions(final byte[] nameOfRegionA, final byte[] nameOfRegionB, 1843 final boolean forcible) throws IOException { 1844 mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible); 1845 } 1846 1847 /** 1848 * Merge two regions. Asynchronous operation. 1849 * @param nameofRegionsToMerge encoded or full name of daughter regions 1850 * @param forcible true if do a compulsory merge, otherwise we will only merge 1851 * adjacent regions 1852 */ 1853 @Override 1854 public Future<Void> mergeRegionsAsync(final byte[][] nameofRegionsToMerge, final boolean forcible) 1855 throws IOException { 1856 Preconditions.checkArgument(nameofRegionsToMerge.length >= 2, "Can not merge only %s region", 1857 nameofRegionsToMerge.length); 1858 byte[][] encodedNameofRegionsToMerge = new byte[nameofRegionsToMerge.length][]; 1859 for (int i = 0; i < nameofRegionsToMerge.length; i++) { 1860 encodedNameofRegionsToMerge[i] = RegionInfo.isEncodedRegionName(nameofRegionsToMerge[i]) 1861 ? nameofRegionsToMerge[i] 1862 : Bytes.toBytes(RegionInfo.encodeRegionName(nameofRegionsToMerge[i])); 1863 } 1864 1865 TableName tableName = null; 1866 Pair<RegionInfo, ServerName> pair; 1867 1868 for (int i = 0; i < nameofRegionsToMerge.length; i++) { 1869 pair = getRegion(nameofRegionsToMerge[i]); 1870 1871 if (pair != null) { 1872 if (pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { 1873 throw new IllegalArgumentException("Can't invoke merge on non-default regions directly"); 1874 } 1875 if (tableName == null) { 1876 tableName = pair.getFirst().getTable(); 1877 } else if (!tableName.equals(pair.getFirst().getTable())) { 1878 throw new IllegalArgumentException("Cannot merge regions from two different tables " 1879 + tableName + " and " + pair.getFirst().getTable()); 1880 } 1881 } else { 1882 throw new UnknownRegionException("Can't invoke merge on unknown region " 1883 + Bytes.toStringBinary(encodedNameofRegionsToMerge[i])); 1884 } 1885 } 1886 1887 MergeTableRegionsResponse response = executeCallable( 1888 new MasterCallable<MergeTableRegionsResponse>(getConnection(), getRpcControllerFactory()) { 1889 Long nonceGroup = ng.getNonceGroup(); 1890 Long nonce = ng.newNonce(); 1891 1892 @Override 1893 protected MergeTableRegionsResponse rpcCall() throws Exception { 1894 MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest( 1895 encodedNameofRegionsToMerge, forcible, nonceGroup, nonce); 1896 return master.mergeTableRegions(getRpcController(), request); 1897 } 1898 }); 1899 return new MergeTableRegionsFuture(this, tableName, response); 1900 } 1901 1902 private static class MergeTableRegionsFuture extends TableFuture<Void> { 1903 public MergeTableRegionsFuture(final HBaseAdmin admin, final TableName tableName, 1904 final MergeTableRegionsResponse response) { 1905 super(admin, tableName, 1906 (response != null && response.hasProcId()) ? response.getProcId() : null); 1907 } 1908 1909 public MergeTableRegionsFuture(final HBaseAdmin admin, final TableName tableName, 1910 final Long procId) { 1911 super(admin, tableName, procId); 1912 } 1913 1914 @Override 1915 public String getOperationType() { 1916 return "MERGE_REGIONS"; 1917 } 1918 } 1919 1920 /** 1921 * Split one region. Synchronous operation. Note: It is not feasible to predict the length of 1922 * split. Therefore, this is for internal testing only. 1923 * @param regionName encoded or full name of region 1924 * @param splitPoint key where region splits 1925 * @throws IOException if a remote or network exception occurs 1926 */ 1927 public void splitRegionSync(byte[] regionName, byte[] splitPoint) throws IOException { 1928 splitRegionSync(regionName, splitPoint, syncWaitTimeout, TimeUnit.MILLISECONDS); 1929 } 1930 1931 /** 1932 * Split one region. Synchronous operation. 1933 * @param regionName region to be split 1934 * @param splitPoint split point 1935 * @param timeout how long to wait on split 1936 * @param units time units 1937 * @throws IOException if a remote or network exception occurs 1938 */ 1939 public void splitRegionSync(byte[] regionName, byte[] splitPoint, final long timeout, 1940 final TimeUnit units) throws IOException { 1941 get(splitRegionAsync(regionName, splitPoint), timeout, units); 1942 } 1943 1944 @Override 1945 public Future<Void> splitRegionAsync(byte[] regionName, byte[] splitPoint) throws IOException { 1946 byte[] encodedNameofRegionToSplit = HRegionInfo.isEncodedRegionName(regionName) 1947 ? regionName 1948 : Bytes.toBytes(HRegionInfo.encodeRegionName(regionName)); 1949 Pair<RegionInfo, ServerName> pair = getRegion(regionName); 1950 if (pair != null) { 1951 if ( 1952 pair.getFirst() != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID 1953 ) { 1954 throw new IllegalArgumentException("Can't invoke split on non-default regions directly"); 1955 } 1956 } else { 1957 throw new UnknownRegionException( 1958 "Can't invoke merge on unknown region " + Bytes.toStringBinary(encodedNameofRegionToSplit)); 1959 } 1960 1961 return splitRegionAsync(pair.getFirst(), splitPoint); 1962 } 1963 1964 Future<Void> splitRegionAsync(RegionInfo hri, byte[] splitPoint) throws IOException { 1965 TableName tableName = hri.getTable(); 1966 if ( 1967 hri.getStartKey() != null && splitPoint != null 1968 && Bytes.compareTo(hri.getStartKey(), splitPoint) == 0 1969 ) { 1970 throw new IOException("should not give a splitkey which equals to startkey!"); 1971 } 1972 1973 SplitTableRegionResponse response = executeCallable( 1974 new MasterCallable<SplitTableRegionResponse>(getConnection(), getRpcControllerFactory()) { 1975 Long nonceGroup = ng.getNonceGroup(); 1976 Long nonce = ng.newNonce(); 1977 1978 @Override 1979 protected SplitTableRegionResponse rpcCall() throws Exception { 1980 setPriority(tableName); 1981 SplitTableRegionRequest request = 1982 RequestConverter.buildSplitTableRegionRequest(hri, splitPoint, nonceGroup, nonce); 1983 return master.splitRegion(getRpcController(), request); 1984 } 1985 }); 1986 return new SplitTableRegionFuture(this, tableName, response); 1987 } 1988 1989 private static class SplitTableRegionFuture extends TableFuture<Void> { 1990 public SplitTableRegionFuture(final HBaseAdmin admin, final TableName tableName, 1991 final SplitTableRegionResponse response) { 1992 super(admin, tableName, 1993 (response != null && response.hasProcId()) ? response.getProcId() : null); 1994 } 1995 1996 public SplitTableRegionFuture(final HBaseAdmin admin, final TableName tableName, 1997 final Long procId) { 1998 super(admin, tableName, procId); 1999 } 2000 2001 @Override 2002 public String getOperationType() { 2003 return "SPLIT_REGION"; 2004 } 2005 } 2006 2007 @Override 2008 public void split(final TableName tableName) throws IOException { 2009 split(tableName, null); 2010 } 2011 2012 @Override 2013 public void splitRegion(final byte[] regionName) throws IOException { 2014 splitRegion(regionName, null); 2015 } 2016 2017 @Override 2018 public void split(final TableName tableName, final byte[] splitPoint) throws IOException { 2019 checkTableExists(tableName); 2020 for (HRegionLocation loc : connection.locateRegions(tableName, false, false)) { 2021 ServerName sn = loc.getServerName(); 2022 if (sn == null) { 2023 continue; 2024 } 2025 RegionInfo r = loc.getRegion(); 2026 // check for parents 2027 if (r.isSplitParent()) { 2028 continue; 2029 } 2030 // if a split point given, only split that particular region 2031 if ( 2032 r.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID 2033 || (splitPoint != null && !r.containsRow(splitPoint)) 2034 ) { 2035 continue; 2036 } 2037 // call out to master to do split now 2038 splitRegionAsync(r, splitPoint); 2039 } 2040 } 2041 2042 @Override 2043 public void splitRegion(final byte[] regionName, final byte[] splitPoint) throws IOException { 2044 Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName); 2045 if (regionServerPair == null) { 2046 throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName)); 2047 } 2048 if ( 2049 regionServerPair.getFirst() != null 2050 && regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID 2051 ) { 2052 throw new IllegalArgumentException( 2053 "Can't split replicas directly. " + "Replicas are auto-split when their primary is split."); 2054 } 2055 if (regionServerPair.getSecond() == null) { 2056 throw new NoServerForRegionException(Bytes.toStringBinary(regionName)); 2057 } 2058 splitRegionAsync(regionServerPair.getFirst(), splitPoint); 2059 } 2060 2061 @Override 2062 public void truncateRegion(byte[] regionName) throws IOException { 2063 get(truncateRegionAsync(regionName), syncWaitTimeout, TimeUnit.MILLISECONDS); 2064 } 2065 2066 @Override 2067 public Future<Void> truncateRegionAsync(byte[] regionName) throws IOException { 2068 Pair<RegionInfo, ServerName> pair = getRegion(regionName); 2069 RegionInfo hri; 2070 2071 if (pair != null) { 2072 hri = pair.getFirst(); 2073 if (hri != null && hri.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { 2074 throw new IllegalArgumentException( 2075 "Can't truncate replicas directly.Replicas are auto-truncated " 2076 + "when their primary is truncated."); 2077 } 2078 } else { 2079 throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName)); 2080 } 2081 2082 TableName tableName = (hri != null) ? hri.getTable() : null; 2083 2084 MasterProtos.TruncateRegionResponse response = 2085 executeCallable(getTruncateRegionCallable(tableName, hri)); 2086 2087 return new TruncateRegionFuture(this, tableName, response); 2088 } 2089 2090 /** 2091 * Get the list of cached files 2092 */ 2093 @Override 2094 public List<String> getCachedFilesList(ServerName serverName) throws IOException { 2095 return ProtobufUtil.getCachedFilesList(rpcControllerFactory.newController(), 2096 this.connection.getAdmin(serverName)); 2097 } 2098 2099 private MasterCallable<MasterProtos.TruncateRegionResponse> 2100 getTruncateRegionCallable(TableName tableName, RegionInfo hri) { 2101 return new MasterCallable<MasterProtos.TruncateRegionResponse>(getConnection(), 2102 getRpcControllerFactory()) { 2103 Long nonceGroup = ng.getNonceGroup(); 2104 Long nonce = ng.newNonce(); 2105 2106 @Override 2107 protected MasterProtos.TruncateRegionResponse rpcCall() throws Exception { 2108 setPriority(tableName); 2109 MasterProtos.TruncateRegionRequest request = 2110 RequestConverter.buildTruncateRegionRequest(hri, nonceGroup, nonce); 2111 return master.truncateRegion(getRpcController(), request); 2112 } 2113 }; 2114 } 2115 2116 private static class TruncateRegionFuture extends TableFuture<Void> { 2117 public TruncateRegionFuture(final HBaseAdmin admin, final TableName tableName, 2118 final MasterProtos.TruncateRegionResponse response) { 2119 super(admin, tableName, 2120 (response != null && response.hasProcId()) ? response.getProcId() : null); 2121 } 2122 2123 @Override 2124 public String getOperationType() { 2125 return "TRUNCATE_REGION"; 2126 } 2127 } 2128 2129 private static class ModifyTableFuture extends TableFuture<Void> { 2130 public ModifyTableFuture(final HBaseAdmin admin, final TableName tableName, 2131 final ModifyTableResponse response) { 2132 super(admin, tableName, 2133 (response != null && response.hasProcId()) ? response.getProcId() : null); 2134 } 2135 2136 public ModifyTableFuture(final HBaseAdmin admin, final TableName tableName, final Long procId) { 2137 super(admin, tableName, procId); 2138 } 2139 2140 @Override 2141 public String getOperationType() { 2142 return "MODIFY"; 2143 } 2144 2145 @Override 2146 protected Void postOperationResult(final Void result, final long deadlineTs) 2147 throws IOException, TimeoutException { 2148 // The modify operation on the table is asynchronous on the server side irrespective 2149 // of whether Procedure V2 is supported or not. So, we wait in the client till 2150 // all regions get updated. 2151 waitForSchemaUpdate(deadlineTs); 2152 return result; 2153 } 2154 } 2155 2156 /** 2157 * @param regionName Name of a region. 2158 * @return a pair of HRegionInfo and ServerName if <code>regionName</code> is a verified region 2159 * name (we call {@link MetaTableAccessor#getRegionLocation(Connection, byte[])} else 2160 * null. Throw IllegalArgumentException if <code>regionName</code> is null. 2161 * @throws IOException if a remote or network exception occurs 2162 */ 2163 Pair<RegionInfo, ServerName> getRegion(final byte[] regionName) throws IOException { 2164 if (regionName == null) { 2165 throw new IllegalArgumentException("Pass a table name or region name"); 2166 } 2167 Pair<RegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionName); 2168 if (pair == null) { 2169 final String encodedName = Bytes.toString(regionName); 2170 // When it is not a valid regionName, it is possible that it could be an encoded regionName. 2171 // To match the encoded regionName, it has to scan the meta table and compare entry by entry. 2172 // Since it scans meta table, so it has to be the MD5 hash, it can filter out 2173 // most of invalid cases. 2174 if (!RegionInfo.isMD5Hash(encodedName)) { 2175 return null; 2176 } 2177 final AtomicReference<Pair<RegionInfo, ServerName>> result = new AtomicReference<>(null); 2178 MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { 2179 @Override 2180 public boolean visit(Result data) throws IOException { 2181 RegionInfo info = MetaTableAccessor.getRegionInfo(data); 2182 if (info == null) { 2183 LOG.warn("No serialized HRegionInfo in " + data); 2184 return true; 2185 } 2186 RegionLocations rl = MetaTableAccessor.getRegionLocations(data); 2187 boolean matched = false; 2188 ServerName sn = null; 2189 if (rl != null) { 2190 for (HRegionLocation h : rl.getRegionLocations()) { 2191 if (h != null && encodedName.equals(h.getRegionInfo().getEncodedName())) { 2192 sn = h.getServerName(); 2193 info = h.getRegionInfo(); 2194 matched = true; 2195 } 2196 } 2197 } 2198 if (!matched) return true; 2199 result.set(new Pair<>(info, sn)); 2200 return false; // found the region, stop 2201 } 2202 }; 2203 2204 MetaTableAccessor.fullScanRegions(connection, visitor); 2205 pair = result.get(); 2206 } 2207 return pair; 2208 } 2209 2210 /** 2211 * If the input is a region name, it is returned as is. If it's an encoded region name, the 2212 * corresponding region is found from meta and its region name is returned. If we can't find any 2213 * region in meta matching the input as either region name or encoded region name, the input is 2214 * returned as is. We don't throw unknown region exception. 2215 */ 2216 private byte[] getRegionName(final byte[] regionNameOrEncodedRegionName) throws IOException { 2217 if ( 2218 Bytes.equals(regionNameOrEncodedRegionName, HRegionInfo.FIRST_META_REGIONINFO.getRegionName()) 2219 || Bytes.equals(regionNameOrEncodedRegionName, 2220 HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes()) 2221 ) { 2222 return HRegionInfo.FIRST_META_REGIONINFO.getRegionName(); 2223 } 2224 byte[] tmp = regionNameOrEncodedRegionName; 2225 Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionNameOrEncodedRegionName); 2226 if (regionServerPair != null && regionServerPair.getFirst() != null) { 2227 tmp = regionServerPair.getFirst().getRegionName(); 2228 } 2229 return tmp; 2230 } 2231 2232 /** 2233 * Check if table exists or not 2234 * @param tableName Name of a table. 2235 * @return tableName instance 2236 * @throws IOException if a remote or network exception occurs. 2237 * @throws TableNotFoundException if table does not exist. 2238 */ 2239 private TableName checkTableExists(final TableName tableName) throws IOException { 2240 return executeCallable(new RpcRetryingCallable<TableName>() { 2241 @Override 2242 protected TableName rpcCall(int callTimeout) throws Exception { 2243 if (MetaTableAccessor.getTableState(getConnection(), tableName) == null) { 2244 throw new TableNotFoundException(tableName); 2245 } 2246 return tableName; 2247 } 2248 }); 2249 } 2250 2251 @Override 2252 public synchronized void shutdown() throws IOException { 2253 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 2254 @Override 2255 protected Void rpcCall() throws Exception { 2256 setPriority(HConstants.HIGH_QOS); 2257 master.shutdown(getRpcController(), ShutdownRequest.newBuilder().build()); 2258 return null; 2259 } 2260 }); 2261 } 2262 2263 @Override 2264 public synchronized void stopMaster() throws IOException { 2265 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 2266 @Override 2267 protected Void rpcCall() throws Exception { 2268 setPriority(HConstants.HIGH_QOS); 2269 master.stopMaster(getRpcController(), StopMasterRequest.newBuilder().build()); 2270 return null; 2271 } 2272 }); 2273 } 2274 2275 @Override 2276 public synchronized void stopRegionServer(final String hostnamePort) throws IOException { 2277 String hostname = Addressing.parseHostname(hostnamePort); 2278 int port = Addressing.parsePort(hostnamePort); 2279 final AdminService.BlockingInterface admin = 2280 this.connection.getAdmin(ServerName.valueOf(hostname, port, 0)); 2281 // TODO: There is no timeout on this controller. Set one! 2282 HBaseRpcController controller = rpcControllerFactory.newController(); 2283 controller.setPriority(HConstants.HIGH_QOS); 2284 StopServerRequest request = RequestConverter 2285 .buildStopServerRequest("Called by admin client " + this.connection.toString()); 2286 try { 2287 admin.stopServer(controller, request); 2288 } catch (Exception e) { 2289 throw ProtobufUtil.handleRemoteException(e); 2290 } 2291 } 2292 2293 @Override 2294 public boolean isMasterInMaintenanceMode() throws IOException { 2295 return executeCallable( 2296 new MasterCallable<IsInMaintenanceModeResponse>(getConnection(), this.rpcControllerFactory) { 2297 @Override 2298 protected IsInMaintenanceModeResponse rpcCall() throws Exception { 2299 return master.isMasterInMaintenanceMode(getRpcController(), 2300 IsInMaintenanceModeRequest.newBuilder().build()); 2301 } 2302 }).getInMaintenanceMode(); 2303 } 2304 2305 @Override 2306 public ClusterMetrics getClusterMetrics(EnumSet<Option> options) throws IOException { 2307 return executeCallable( 2308 new MasterCallable<ClusterMetrics>(getConnection(), this.rpcControllerFactory) { 2309 @Override 2310 protected ClusterMetrics rpcCall() throws Exception { 2311 GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest(options); 2312 return ClusterMetricsBuilder 2313 .toClusterMetrics(master.getClusterStatus(getRpcController(), req).getClusterStatus()); 2314 } 2315 }); 2316 } 2317 2318 @Override 2319 public List<RegionMetrics> getRegionMetrics(ServerName serverName, TableName tableName) 2320 throws IOException { 2321 AdminService.BlockingInterface admin = this.connection.getAdmin(serverName); 2322 HBaseRpcController controller = rpcControllerFactory.newController(); 2323 AdminProtos.GetRegionLoadRequest request = 2324 RequestConverter.buildGetRegionLoadRequest(tableName); 2325 try { 2326 return admin.getRegionLoad(controller, request).getRegionLoadsList().stream() 2327 .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList()); 2328 } catch (ServiceException se) { 2329 throw ProtobufUtil.getRemoteException(se); 2330 } 2331 } 2332 2333 @Override 2334 public Configuration getConfiguration() { 2335 return this.conf; 2336 } 2337 2338 /** 2339 * Do a get with a timeout against the passed in <code>future</code>. 2340 */ 2341 private static <T> T get(final Future<T> future, final long timeout, final TimeUnit units) 2342 throws IOException { 2343 try { 2344 // TODO: how long should we wait? Spin forever? 2345 return future.get(timeout, units); 2346 } catch (InterruptedException e) { 2347 IOException ioe = new InterruptedIOException("Interrupt while waiting on " + future); 2348 ioe.initCause(e); 2349 throw ioe; 2350 } catch (TimeoutException e) { 2351 throw new TimeoutIOException(e); 2352 } catch (ExecutionException e) { 2353 if (e.getCause() instanceof IOException) { 2354 throw (IOException) e.getCause(); 2355 } else { 2356 throw new IOException(e.getCause()); 2357 } 2358 } 2359 } 2360 2361 @Override 2362 public Future<Void> createNamespaceAsync(final NamespaceDescriptor descriptor) 2363 throws IOException { 2364 CreateNamespaceResponse response = executeCallable( 2365 new MasterCallable<CreateNamespaceResponse>(getConnection(), getRpcControllerFactory()) { 2366 @Override 2367 protected CreateNamespaceResponse rpcCall() throws Exception { 2368 return master.createNamespace(getRpcController(), CreateNamespaceRequest.newBuilder() 2369 .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build()); 2370 } 2371 }); 2372 return new NamespaceFuture(this, descriptor.getName(), response.getProcId()) { 2373 @Override 2374 public String getOperationType() { 2375 return "CREATE_NAMESPACE"; 2376 } 2377 }; 2378 } 2379 2380 @Override 2381 public Future<Void> modifyNamespaceAsync(final NamespaceDescriptor descriptor) 2382 throws IOException { 2383 ModifyNamespaceResponse response = executeCallable( 2384 new MasterCallable<ModifyNamespaceResponse>(getConnection(), getRpcControllerFactory()) { 2385 @Override 2386 protected ModifyNamespaceResponse rpcCall() throws Exception { 2387 // TODO: set priority based on NS? 2388 return master.modifyNamespace(getRpcController(), ModifyNamespaceRequest.newBuilder() 2389 .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build()); 2390 } 2391 }); 2392 return new NamespaceFuture(this, descriptor.getName(), response.getProcId()) { 2393 @Override 2394 public String getOperationType() { 2395 return "MODIFY_NAMESPACE"; 2396 } 2397 }; 2398 } 2399 2400 @Override 2401 public Future<Void> deleteNamespaceAsync(final String name) throws IOException { 2402 DeleteNamespaceResponse response = executeCallable( 2403 new MasterCallable<DeleteNamespaceResponse>(getConnection(), getRpcControllerFactory()) { 2404 @Override 2405 protected DeleteNamespaceResponse rpcCall() throws Exception { 2406 // TODO: set priority based on NS? 2407 return master.deleteNamespace(getRpcController(), 2408 DeleteNamespaceRequest.newBuilder().setNamespaceName(name).build()); 2409 } 2410 }); 2411 return new NamespaceFuture(this, name, response.getProcId()) { 2412 @Override 2413 public String getOperationType() { 2414 return "DELETE_NAMESPACE"; 2415 } 2416 }; 2417 } 2418 2419 @Override 2420 public NamespaceDescriptor getNamespaceDescriptor(final String name) 2421 throws NamespaceNotFoundException, IOException { 2422 return executeCallable( 2423 new MasterCallable<NamespaceDescriptor>(getConnection(), getRpcControllerFactory()) { 2424 @Override 2425 protected NamespaceDescriptor rpcCall() throws Exception { 2426 return ProtobufUtil.toNamespaceDescriptor(master 2427 .getNamespaceDescriptor(getRpcController(), 2428 GetNamespaceDescriptorRequest.newBuilder().setNamespaceName(name).build()) 2429 .getNamespaceDescriptor()); 2430 } 2431 }); 2432 } 2433 2434 /** 2435 * List available namespaces 2436 * @return List of namespace names 2437 * @throws IOException if a remote or network exception occurs 2438 */ 2439 @Override 2440 public String[] listNamespaces() throws IOException { 2441 return executeCallable( 2442 new MasterCallable<String[]>(getConnection(), getRpcControllerFactory()) { 2443 @Override 2444 protected String[] rpcCall() throws Exception { 2445 List<String> list = 2446 master.listNamespaces(getRpcController(), ListNamespacesRequest.newBuilder().build()) 2447 .getNamespaceNameList(); 2448 return list.toArray(new String[list.size()]); 2449 } 2450 }); 2451 } 2452 2453 /** 2454 * List available namespace descriptors 2455 * @return List of descriptors 2456 * @throws IOException if a remote or network exception occurs 2457 */ 2458 @Override 2459 public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException { 2460 return executeCallable( 2461 new MasterCallable<NamespaceDescriptor[]>(getConnection(), getRpcControllerFactory()) { 2462 @Override 2463 protected NamespaceDescriptor[] rpcCall() throws Exception { 2464 List< 2465 HBaseProtos.NamespaceDescriptor> list = 2466 master 2467 .listNamespaceDescriptors(getRpcController(), 2468 ListNamespaceDescriptorsRequest.newBuilder().build()) 2469 .getNamespaceDescriptorList(); 2470 NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()]; 2471 for (int i = 0; i < list.size(); i++) { 2472 res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i)); 2473 } 2474 return res; 2475 } 2476 }); 2477 } 2478 2479 @Override 2480 public String getProcedures() throws IOException { 2481 return executeCallable(new MasterCallable<String>(getConnection(), getRpcControllerFactory()) { 2482 @Override 2483 protected String rpcCall() throws Exception { 2484 GetProceduresRequest request = GetProceduresRequest.newBuilder().build(); 2485 GetProceduresResponse response = master.getProcedures(getRpcController(), request); 2486 return ProtobufUtil.toProcedureJson(response.getProcedureList()); 2487 } 2488 }); 2489 } 2490 2491 @Override 2492 public String getLocks() throws IOException { 2493 return executeCallable(new MasterCallable<String>(getConnection(), getRpcControllerFactory()) { 2494 @Override 2495 protected String rpcCall() throws Exception { 2496 GetLocksRequest request = GetLocksRequest.newBuilder().build(); 2497 GetLocksResponse response = master.getLocks(getRpcController(), request); 2498 return ProtobufUtil.toLockJson(response.getLockList()); 2499 } 2500 }); 2501 } 2502 2503 @Override 2504 public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException { 2505 return executeCallable( 2506 new MasterCallable<HTableDescriptor[]>(getConnection(), getRpcControllerFactory()) { 2507 @Override 2508 protected HTableDescriptor[] rpcCall() throws Exception { 2509 List<TableSchema> list = master 2510 .listTableDescriptorsByNamespace(getRpcController(), 2511 ListTableDescriptorsByNamespaceRequest.newBuilder().setNamespaceName(name).build()) 2512 .getTableSchemaList(); 2513 HTableDescriptor[] res = new HTableDescriptor[list.size()]; 2514 for (int i = 0; i < list.size(); i++) { 2515 res[i] = new ImmutableHTableDescriptor(ProtobufUtil.toTableDescriptor(list.get(i))); 2516 } 2517 return res; 2518 } 2519 }); 2520 } 2521 2522 @Override 2523 public TableName[] listTableNamesByNamespace(final String name) throws IOException { 2524 return executeCallable( 2525 new MasterCallable<TableName[]>(getConnection(), getRpcControllerFactory()) { 2526 @Override 2527 protected TableName[] rpcCall() throws Exception { 2528 List<HBaseProtos.TableName> tableNames = master 2529 .listTableNamesByNamespace(getRpcController(), 2530 ListTableNamesByNamespaceRequest.newBuilder().setNamespaceName(name).build()) 2531 .getTableNameList(); 2532 TableName[] result = new TableName[tableNames.size()]; 2533 for (int i = 0; i < tableNames.size(); i++) { 2534 result[i] = ProtobufUtil.toTableName(tableNames.get(i)); 2535 } 2536 return result; 2537 } 2538 }); 2539 } 2540 2541 /** 2542 * Is HBase available? Throw an exception if not. 2543 * @param conf system configuration 2544 * @throws MasterNotRunningException if the master is not running. 2545 * @throws ZooKeeperConnectionException if unable to connect to zookeeper. // TODO do not expose 2546 * ZKConnectionException. 2547 */ 2548 public static void available(final Configuration conf) 2549 throws MasterNotRunningException, ZooKeeperConnectionException, IOException { 2550 Configuration copyOfConf = HBaseConfiguration.create(conf); 2551 // We set it to make it fail as soon as possible if HBase is not available 2552 copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); 2553 copyOfConf.setInt("zookeeper.recovery.retry", 0); 2554 2555 // Check ZK first. 2556 // If the connection exists, we may have a connection to ZK that does not work anymore 2557 try (ClusterConnection connection = 2558 (ClusterConnection) ConnectionFactory.createConnection(copyOfConf)) { 2559 // can throw MasterNotRunningException 2560 connection.isMasterRunning(); 2561 } 2562 } 2563 2564 /** 2565 * @return List of {@link HRegionInfo}. 2566 * @throws IOException if a remote or network exception occurs 2567 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use 2568 * {@link #getRegions(TableName)}. 2569 */ 2570 @Deprecated 2571 @Override 2572 public List<HRegionInfo> getTableRegions(final TableName tableName) throws IOException { 2573 return getRegions(tableName).stream().map(ImmutableHRegionInfo::new) 2574 .collect(Collectors.toList()); 2575 } 2576 2577 @Override 2578 public synchronized void close() throws IOException { 2579 } 2580 2581 @Override 2582 public HTableDescriptor[] getTableDescriptorsByTableName(final List<TableName> tableNames) 2583 throws IOException { 2584 return executeCallable( 2585 new MasterCallable<HTableDescriptor[]>(getConnection(), getRpcControllerFactory()) { 2586 @Override 2587 protected HTableDescriptor[] rpcCall() throws Exception { 2588 GetTableDescriptorsRequest req = 2589 RequestConverter.buildGetTableDescriptorsRequest(tableNames); 2590 return ProtobufUtil 2591 .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)).stream() 2592 .map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new); 2593 } 2594 }); 2595 } 2596 2597 @Override 2598 public HTableDescriptor[] getTableDescriptors(List<String> names) throws IOException { 2599 List<TableName> tableNames = new ArrayList<>(names.size()); 2600 for (String name : names) { 2601 tableNames.add(TableName.valueOf(name)); 2602 } 2603 return getTableDescriptorsByTableName(tableNames); 2604 } 2605 2606 private RollWALWriterResponse rollWALWriterImpl(final ServerName sn) 2607 throws IOException, FailedLogCloseException { 2608 final AdminService.BlockingInterface admin = this.connection.getAdmin(sn); 2609 RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest(); 2610 // TODO: There is no timeout on this controller. Set one! 2611 HBaseRpcController controller = rpcControllerFactory.newController(); 2612 try { 2613 return admin.rollWALWriter(controller, request); 2614 } catch (ServiceException e) { 2615 throw ProtobufUtil.handleRemoteException(e); 2616 } 2617 } 2618 2619 @Override 2620 public synchronized void rollWALWriter(ServerName serverName) 2621 throws IOException, FailedLogCloseException { 2622 rollWALWriterImpl(serverName); 2623 } 2624 2625 @Override 2626 public CompactionState getCompactionState(final TableName tableName) throws IOException { 2627 return getCompactionState(tableName, CompactType.NORMAL); 2628 } 2629 2630 @Override 2631 public CompactionState getCompactionStateForRegion(final byte[] regionName) throws IOException { 2632 final Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName); 2633 if (regionServerPair == null) { 2634 throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName)); 2635 } 2636 if (regionServerPair.getSecond() == null) { 2637 throw new NoServerForRegionException(Bytes.toStringBinary(regionName)); 2638 } 2639 ServerName sn = regionServerPair.getSecond(); 2640 final AdminService.BlockingInterface admin = this.connection.getAdmin(sn); 2641 // TODO: There is no timeout on this controller. Set one! 2642 HBaseRpcController controller = rpcControllerFactory.newController(); 2643 GetRegionInfoRequest request = 2644 RequestConverter.buildGetRegionInfoRequest(regionServerPair.getFirst().getRegionName(), true); 2645 GetRegionInfoResponse response; 2646 try { 2647 response = admin.getRegionInfo(controller, request); 2648 } catch (ServiceException e) { 2649 throw ProtobufUtil.handleRemoteException(e); 2650 } 2651 if (response.getCompactionState() != null) { 2652 return ProtobufUtil.createCompactionState(response.getCompactionState()); 2653 } 2654 return null; 2655 } 2656 2657 @Override 2658 public Future<Void> snapshotAsync(SnapshotDescription snapshotDesc) 2659 throws IOException, SnapshotCreationException { 2660 SnapshotResponse resp = asyncSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc)); 2661 // This is for keeping compatibility with old implementation. 2662 // If there is a procId field in the response, then the snapshot will be operated with a 2663 // SnapshotProcedure, otherwise the snapshot will be coordinated by zk. 2664 if (resp.hasProcId()) { 2665 return new SnapshotFuture(this, snapshotDesc, resp.getProcId()); 2666 } else { 2667 return new SnapshotFuture(this, snapshotDesc, null); 2668 } 2669 } 2670 2671 private static final class SnapshotFuture extends TableFuture<Void> { 2672 private final SnapshotDescription snapshotDesc; 2673 2674 public SnapshotFuture(HBaseAdmin admin, SnapshotDescription snapshotDesc, Long procId) { 2675 super(admin, snapshotDesc.getTableName(), procId); 2676 this.snapshotDesc = snapshotDesc; 2677 } 2678 2679 @Override 2680 public String getOperationType() { 2681 return "SNAPSHOT"; 2682 } 2683 2684 @Override 2685 protected Void waitOperationResult(long deadlineTs) throws IOException, TimeoutException { 2686 waitForState(deadlineTs, new TableWaitForStateCallable() { 2687 @Override 2688 public boolean checkState(int tries) throws IOException { 2689 return getAdmin().isSnapshotFinished(snapshotDesc); 2690 } 2691 }); 2692 return null; 2693 } 2694 } 2695 2696 private SnapshotResponse asyncSnapshot(SnapshotProtos.SnapshotDescription snapshot) 2697 throws IOException { 2698 ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot); 2699 final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot) 2700 .setNonceGroup(ng.newNonce()).setNonce(ng.newNonce()).build(); 2701 // run the snapshot on the master 2702 return executeCallable( 2703 new MasterCallable<SnapshotResponse>(getConnection(), getRpcControllerFactory()) { 2704 @Override 2705 protected SnapshotResponse rpcCall() throws Exception { 2706 return master.snapshot(getRpcController(), request); 2707 } 2708 }); 2709 } 2710 2711 @Override 2712 public boolean isSnapshotFinished(final SnapshotDescription snapshotDesc) 2713 throws IOException, HBaseSnapshotException, UnknownSnapshotException { 2714 final SnapshotProtos.SnapshotDescription snapshot = 2715 ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc); 2716 return executeCallable( 2717 new MasterCallable<IsSnapshotDoneResponse>(getConnection(), getRpcControllerFactory()) { 2718 @Override 2719 protected IsSnapshotDoneResponse rpcCall() throws Exception { 2720 return master.isSnapshotDone(getRpcController(), 2721 IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build()); 2722 } 2723 }).getDone(); 2724 } 2725 2726 @Override 2727 public void restoreSnapshot(final byte[] snapshotName) 2728 throws IOException, RestoreSnapshotException { 2729 restoreSnapshot(Bytes.toString(snapshotName)); 2730 } 2731 2732 @Override 2733 public void restoreSnapshot(final String snapshotName) 2734 throws IOException, RestoreSnapshotException { 2735 boolean takeFailSafeSnapshot = 2736 conf.getBoolean(HConstants.SNAPSHOT_RESTORE_TAKE_FAILSAFE_SNAPSHOT, 2737 HConstants.DEFAULT_SNAPSHOT_RESTORE_TAKE_FAILSAFE_SNAPSHOT); 2738 restoreSnapshot(snapshotName, takeFailSafeSnapshot); 2739 } 2740 2741 @Override 2742 public void restoreSnapshot(final byte[] snapshotName, final boolean takeFailSafeSnapshot) 2743 throws IOException, RestoreSnapshotException { 2744 restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot); 2745 } 2746 2747 /** 2748 * Check whether the snapshot exists and contains disabled table 2749 * @param snapshotName name of the snapshot to restore 2750 * @throws IOException if a remote or network exception occurs 2751 * @throws RestoreSnapshotException if no valid snapshot is found 2752 */ 2753 private TableName getTableNameBeforeRestoreSnapshot(final String snapshotName) 2754 throws IOException, RestoreSnapshotException { 2755 TableName tableName = null; 2756 for (SnapshotDescription snapshotInfo : listSnapshots()) { 2757 if (snapshotInfo.getName().equals(snapshotName)) { 2758 tableName = snapshotInfo.getTableName(); 2759 break; 2760 } 2761 } 2762 2763 if (tableName == null) { 2764 throw new RestoreSnapshotException( 2765 "Unable to find the table name for snapshot=" + snapshotName); 2766 } 2767 return tableName; 2768 } 2769 2770 @Override 2771 public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot) 2772 throws IOException, RestoreSnapshotException { 2773 restoreSnapshot(snapshotName, takeFailSafeSnapshot, false); 2774 } 2775 2776 @Override 2777 public void restoreSnapshot(final String snapshotName, final boolean takeFailSafeSnapshot, 2778 final boolean restoreAcl) throws IOException, RestoreSnapshotException { 2779 TableName tableName = getTableNameBeforeRestoreSnapshot(snapshotName); 2780 2781 // The table does not exists, switch to clone. 2782 if (!tableExists(tableName)) { 2783 cloneSnapshot(snapshotName, tableName, restoreAcl); 2784 return; 2785 } 2786 2787 // Check if the table is disabled 2788 if (!isTableDisabled(tableName)) { 2789 throw new TableNotDisabledException(tableName); 2790 } 2791 2792 // Take a snapshot of the current state 2793 String failSafeSnapshotSnapshotName = null; 2794 if (takeFailSafeSnapshot) { 2795 failSafeSnapshotSnapshotName = conf.get("hbase.snapshot.restore.failsafe.name", 2796 "hbase-failsafe-{snapshot.name}-{restore.timestamp}"); 2797 failSafeSnapshotSnapshotName = 2798 failSafeSnapshotSnapshotName.replace("{snapshot.name}", snapshotName) 2799 .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.')) 2800 .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime())); 2801 LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName); 2802 snapshot(failSafeSnapshotSnapshotName, tableName); 2803 } 2804 2805 try { 2806 // Restore snapshot 2807 get(internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl, null), syncWaitTimeout, 2808 TimeUnit.MILLISECONDS); 2809 } catch (IOException e) { 2810 // Something went wrong during the restore... 2811 // if the pre-restore snapshot is available try to rollback 2812 if (takeFailSafeSnapshot) { 2813 try { 2814 get( 2815 internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName, restoreAcl, null), 2816 syncWaitTimeout, TimeUnit.MILLISECONDS); 2817 String msg = "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" 2818 + failSafeSnapshotSnapshotName + " succeeded."; 2819 LOG.error(msg, e); 2820 throw new RestoreSnapshotException(msg, e); 2821 } catch (IOException ex) { 2822 String msg = "Failed to restore and rollback to snapshot=" + failSafeSnapshotSnapshotName; 2823 LOG.error(msg, ex); 2824 throw new RestoreSnapshotException(msg, e); 2825 } 2826 } else { 2827 throw new RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e); 2828 } 2829 } 2830 2831 // If the restore is succeeded, delete the pre-restore snapshot 2832 if (takeFailSafeSnapshot) { 2833 try { 2834 LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName); 2835 deleteSnapshot(failSafeSnapshotSnapshotName); 2836 } catch (IOException e) { 2837 LOG.error("Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName, e); 2838 } 2839 } 2840 } 2841 2842 @Override 2843 public Future<Void> restoreSnapshotAsync(final String snapshotName) 2844 throws IOException, RestoreSnapshotException { 2845 TableName tableName = getTableNameBeforeRestoreSnapshot(snapshotName); 2846 2847 // The table does not exists, switch to clone. 2848 if (!tableExists(tableName)) { 2849 return cloneSnapshotAsync(snapshotName, tableName); 2850 } 2851 2852 // Check if the table is disabled 2853 if (!isTableDisabled(tableName)) { 2854 throw new TableNotDisabledException(tableName); 2855 } 2856 2857 return internalRestoreSnapshotAsync(snapshotName, tableName, false, null); 2858 } 2859 2860 @Override 2861 public Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName, 2862 boolean restoreAcl, String customSFT) 2863 throws IOException, TableExistsException, RestoreSnapshotException { 2864 if (tableExists(tableName)) { 2865 throw new TableExistsException(tableName); 2866 } 2867 return internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl, customSFT); 2868 } 2869 2870 @Override 2871 public byte[] execProcedureWithReturn(String signature, String instance, 2872 Map<String, String> props) throws IOException { 2873 ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props); 2874 final ExecProcedureRequest request = 2875 ExecProcedureRequest.newBuilder().setProcedure(desc).build(); 2876 // run the procedure on the master 2877 ExecProcedureResponse response = executeCallable( 2878 new MasterCallable<ExecProcedureResponse>(getConnection(), getRpcControllerFactory()) { 2879 @Override 2880 protected ExecProcedureResponse rpcCall() throws Exception { 2881 return master.execProcedureWithRet(getRpcController(), request); 2882 } 2883 }); 2884 2885 return response.hasReturnData() ? response.getReturnData().toByteArray() : null; 2886 } 2887 2888 @Override 2889 public void execProcedure(String signature, String instance, Map<String, String> props) 2890 throws IOException { 2891 ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props); 2892 final ExecProcedureRequest request = 2893 ExecProcedureRequest.newBuilder().setProcedure(desc).build(); 2894 // run the procedure on the master 2895 ExecProcedureResponse response = executeCallable( 2896 new MasterCallable<ExecProcedureResponse>(getConnection(), getRpcControllerFactory()) { 2897 @Override 2898 protected ExecProcedureResponse rpcCall() throws Exception { 2899 return master.execProcedure(getRpcController(), request); 2900 } 2901 }); 2902 2903 long start = EnvironmentEdgeManager.currentTime(); 2904 long max = response.getExpectedTimeout(); 2905 long maxPauseTime = max / this.numRetries; 2906 int tries = 0; 2907 LOG.debug("Waiting a max of " + max + " ms for procedure '" + signature + " : " + instance 2908 + "'' to complete. (max " + maxPauseTime + " ms per retry)"); 2909 boolean done = false; 2910 while (tries == 0 || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) { 2911 try { 2912 // sleep a backoff <= pauseTime amount 2913 long sleep = getPauseTime(tries++); 2914 sleep = sleep > maxPauseTime ? maxPauseTime : sleep; 2915 LOG.debug( 2916 "(#" + tries + ") Sleeping: " + sleep + "ms while waiting for procedure completion."); 2917 Thread.sleep(sleep); 2918 } catch (InterruptedException e) { 2919 throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e); 2920 } 2921 LOG.debug("Getting current status of procedure from master..."); 2922 done = isProcedureFinished(signature, instance, props); 2923 } 2924 if (!done) { 2925 throw new IOException("Procedure '" + signature + " : " + instance 2926 + "' wasn't completed in expectedTime:" + max + " ms"); 2927 } 2928 } 2929 2930 @Override 2931 public boolean isProcedureFinished(String signature, String instance, Map<String, String> props) 2932 throws IOException { 2933 ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props); 2934 return executeCallable( 2935 new MasterCallable<IsProcedureDoneResponse>(getConnection(), getRpcControllerFactory()) { 2936 @Override 2937 protected IsProcedureDoneResponse rpcCall() throws Exception { 2938 return master.isProcedureDone(getRpcController(), 2939 IsProcedureDoneRequest.newBuilder().setProcedure(desc).build()); 2940 } 2941 }).getDone(); 2942 } 2943 2944 /** 2945 * Execute Restore/Clone snapshot and wait for the server to complete (blocking). To check if the 2946 * cloned table exists, use {@link #isTableAvailable} -- it is not safe to create an HTable 2947 * instance to this table before it is available. 2948 * @param snapshotName snapshot to restore 2949 * @param tableName table name to restore the snapshot on 2950 * @throws IOException if a remote or network exception occurs 2951 * @throws RestoreSnapshotException if snapshot failed to be restored 2952 * @throws IllegalArgumentException if the restore request is formatted incorrectly 2953 */ 2954 private Future<Void> internalRestoreSnapshotAsync(final String snapshotName, 2955 final TableName tableName, final boolean restoreAcl, String customSFT) 2956 throws IOException, RestoreSnapshotException { 2957 final SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription 2958 .newBuilder().setName(snapshotName).setTable(tableName.getNameAsString()).build(); 2959 2960 // actually restore the snapshot 2961 ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot); 2962 2963 RestoreSnapshotResponse response = executeCallable( 2964 new MasterCallable<RestoreSnapshotResponse>(getConnection(), getRpcControllerFactory()) { 2965 Long nonceGroup = ng.getNonceGroup(); 2966 Long nonce = ng.newNonce(); 2967 2968 @Override 2969 protected RestoreSnapshotResponse rpcCall() throws Exception { 2970 final RestoreSnapshotRequest.Builder builder = 2971 RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setNonceGroup(nonceGroup) 2972 .setNonce(nonce).setRestoreACL(restoreAcl); 2973 if (customSFT != null) { 2974 builder.setCustomSFT(customSFT); 2975 } 2976 return master.restoreSnapshot(getRpcController(), builder.build()); 2977 } 2978 }); 2979 2980 return new RestoreSnapshotFuture(this, snapshot, tableName, response); 2981 } 2982 2983 private static class RestoreSnapshotFuture extends TableFuture<Void> { 2984 public RestoreSnapshotFuture(final HBaseAdmin admin, 2985 final SnapshotProtos.SnapshotDescription snapshot, final TableName tableName, 2986 final RestoreSnapshotResponse response) { 2987 super(admin, tableName, 2988 (response != null && response.hasProcId()) ? response.getProcId() : null); 2989 2990 if (response != null && !response.hasProcId()) { 2991 throw new UnsupportedOperationException("Client could not call old version of Server"); 2992 } 2993 } 2994 2995 public RestoreSnapshotFuture(final HBaseAdmin admin, final TableName tableName, 2996 final Long procId) { 2997 super(admin, tableName, procId); 2998 } 2999 3000 @Override 3001 public String getOperationType() { 3002 return "MODIFY"; 3003 } 3004 } 3005 3006 @Override 3007 public List<SnapshotDescription> listSnapshots() throws IOException { 3008 return executeCallable( 3009 new MasterCallable<List<SnapshotDescription>>(getConnection(), getRpcControllerFactory()) { 3010 @Override 3011 protected List<SnapshotDescription> rpcCall() throws Exception { 3012 List<SnapshotProtos.SnapshotDescription> snapshotsList = 3013 master.getCompletedSnapshots(getRpcController(), 3014 GetCompletedSnapshotsRequest.newBuilder().build()).getSnapshotsList(); 3015 List<SnapshotDescription> result = new ArrayList<>(snapshotsList.size()); 3016 for (SnapshotProtos.SnapshotDescription snapshot : snapshotsList) { 3017 result.add(ProtobufUtil.createSnapshotDesc(snapshot)); 3018 } 3019 return result; 3020 } 3021 }); 3022 } 3023 3024 @Override 3025 public List<SnapshotDescription> listSnapshots(String regex) throws IOException { 3026 return listSnapshots(Pattern.compile(regex)); 3027 } 3028 3029 @Override 3030 public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException { 3031 List<SnapshotDescription> matched = new ArrayList<>(); 3032 List<SnapshotDescription> snapshots = listSnapshots(); 3033 for (SnapshotDescription snapshot : snapshots) { 3034 if (pattern.matcher(snapshot.getName()).matches()) { 3035 matched.add(snapshot); 3036 } 3037 } 3038 return matched; 3039 } 3040 3041 @Override 3042 public List<SnapshotDescription> listTableSnapshots(String tableNameRegex, 3043 String snapshotNameRegex) throws IOException { 3044 return listTableSnapshots(Pattern.compile(tableNameRegex), Pattern.compile(snapshotNameRegex)); 3045 } 3046 3047 @Override 3048 public List<SnapshotDescription> listTableSnapshots(Pattern tableNamePattern, 3049 Pattern snapshotNamePattern) throws IOException { 3050 TableName[] tableNames = listTableNames(tableNamePattern); 3051 3052 List<SnapshotDescription> tableSnapshots = new ArrayList<>(); 3053 List<SnapshotDescription> snapshots = listSnapshots(snapshotNamePattern); 3054 3055 List<TableName> listOfTableNames = Arrays.asList(tableNames); 3056 for (SnapshotDescription snapshot : snapshots) { 3057 if (listOfTableNames.contains(snapshot.getTableName())) { 3058 tableSnapshots.add(snapshot); 3059 } 3060 } 3061 return tableSnapshots; 3062 } 3063 3064 @Override 3065 public void deleteSnapshot(final byte[] snapshotName) throws IOException { 3066 deleteSnapshot(Bytes.toString(snapshotName)); 3067 } 3068 3069 @Override 3070 public void deleteSnapshot(final String snapshotName) throws IOException { 3071 // make sure the snapshot is possibly valid 3072 TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName)); 3073 // do the delete 3074 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 3075 @Override 3076 protected Void rpcCall() throws Exception { 3077 master.deleteSnapshot(getRpcController(), 3078 DeleteSnapshotRequest.newBuilder() 3079 .setSnapshot( 3080 SnapshotProtos.SnapshotDescription.newBuilder().setName(snapshotName).build()) 3081 .build()); 3082 return null; 3083 } 3084 }); 3085 } 3086 3087 @Override 3088 public void deleteSnapshots(final String regex) throws IOException { 3089 deleteSnapshots(Pattern.compile(regex)); 3090 } 3091 3092 @Override 3093 public void deleteSnapshots(final Pattern pattern) throws IOException { 3094 List<SnapshotDescription> snapshots = listSnapshots(pattern); 3095 for (final SnapshotDescription snapshot : snapshots) { 3096 try { 3097 internalDeleteSnapshot(snapshot); 3098 } catch (IOException ex) { 3099 LOG.info("Failed to delete snapshot " + snapshot.getName() + " for table " 3100 + snapshot.getTableNameAsString(), ex); 3101 } 3102 } 3103 } 3104 3105 private void internalDeleteSnapshot(final SnapshotDescription snapshot) throws IOException { 3106 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 3107 @Override 3108 protected Void rpcCall() throws Exception { 3109 this.master.deleteSnapshot(getRpcController(), DeleteSnapshotRequest.newBuilder() 3110 .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build()); 3111 return null; 3112 } 3113 }); 3114 } 3115 3116 @Override 3117 public void deleteTableSnapshots(String tableNameRegex, String snapshotNameRegex) 3118 throws IOException { 3119 deleteTableSnapshots(Pattern.compile(tableNameRegex), Pattern.compile(snapshotNameRegex)); 3120 } 3121 3122 @Override 3123 public void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) 3124 throws IOException { 3125 List<SnapshotDescription> snapshots = listTableSnapshots(tableNamePattern, snapshotNamePattern); 3126 for (SnapshotDescription snapshot : snapshots) { 3127 try { 3128 internalDeleteSnapshot(snapshot); 3129 LOG.debug("Successfully deleted snapshot: " + snapshot.getName()); 3130 } catch (IOException e) { 3131 LOG.error("Failed to delete snapshot: " + snapshot.getName(), e); 3132 } 3133 } 3134 } 3135 3136 @Override 3137 public void setQuota(final QuotaSettings quota) throws IOException { 3138 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 3139 @Override 3140 protected Void rpcCall() throws Exception { 3141 this.master.setQuota(getRpcController(), QuotaSettings.buildSetQuotaRequestProto(quota)); 3142 return null; 3143 } 3144 }); 3145 } 3146 3147 @Override 3148 public QuotaRetriever getQuotaRetriever(final QuotaFilter filter) throws IOException { 3149 return QuotaRetriever.open(conf, filter); 3150 } 3151 3152 @Override 3153 public List<QuotaSettings> getQuota(QuotaFilter filter) throws IOException { 3154 List<QuotaSettings> quotas = new ArrayList<>(); 3155 try (QuotaRetriever retriever = QuotaRetriever.open(conf, filter)) { 3156 Iterator<QuotaSettings> iterator = retriever.iterator(); 3157 while (iterator.hasNext()) { 3158 quotas.add(iterator.next()); 3159 } 3160 } 3161 return quotas; 3162 } 3163 3164 private <C extends RetryingCallable<V> & Closeable, V> V executeCallable(C callable) 3165 throws IOException { 3166 return executeCallable(callable, rpcCallerFactory, operationTimeout, rpcTimeout); 3167 } 3168 3169 static private <C extends RetryingCallable<V> & Closeable, V> V executeCallable(C callable, 3170 RpcRetryingCallerFactory rpcCallerFactory, int operationTimeout, int rpcTimeout) 3171 throws IOException { 3172 RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller(rpcTimeout); 3173 try { 3174 return caller.callWithRetries(callable, operationTimeout); 3175 } finally { 3176 callable.close(); 3177 } 3178 } 3179 3180 @Override 3181 // Coprocessor Endpoint against the Master. 3182 public CoprocessorRpcChannel coprocessorService() { 3183 return new SyncCoprocessorRpcChannel() { 3184 @Override 3185 protected Message callExecService(final RpcController controller, 3186 final Descriptors.MethodDescriptor method, final Message request, 3187 final Message responsePrototype) throws IOException { 3188 if (LOG.isTraceEnabled()) { 3189 LOG.trace("Call: " + method.getName() + ", " + request.toString()); 3190 } 3191 // Try-with-resources so close gets called when we are done. 3192 try (MasterCallable<CoprocessorServiceResponse> callable = 3193 new MasterCallable<CoprocessorServiceResponse>(connection, 3194 connection.getRpcControllerFactory()) { 3195 @Override 3196 protected CoprocessorServiceResponse rpcCall() throws Exception { 3197 CoprocessorServiceRequest csr = 3198 CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); 3199 return this.master.execMasterService(getRpcController(), csr); 3200 } 3201 }) { 3202 // TODO: Are we retrying here? Does not seem so. We should use RetryingRpcCaller 3203 callable.prepare(false); 3204 int operationTimeout = connection.getConnectionConfiguration().getOperationTimeout(); 3205 CoprocessorServiceResponse result = callable.call(operationTimeout); 3206 return CoprocessorRpcUtils.getResponse(result, responsePrototype); 3207 } 3208 } 3209 }; 3210 } 3211 3212 @Override 3213 public CoprocessorRpcChannel coprocessorService(final ServerName serverName) { 3214 return new SyncCoprocessorRpcChannel() { 3215 @Override 3216 protected Message callExecService(RpcController controller, 3217 Descriptors.MethodDescriptor method, Message request, Message responsePrototype) 3218 throws IOException { 3219 if (LOG.isTraceEnabled()) { 3220 LOG.trace("Call: " + method.getName() + ", " + request.toString()); 3221 } 3222 CoprocessorServiceRequest csr = 3223 CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); 3224 // TODO: Are we retrying here? Does not seem so. We should use RetryingRpcCaller 3225 // TODO: Make this same as RegionCoprocessorRpcChannel and MasterCoprocessorRpcChannel. They 3226 // are all different though should do same thing; e.g. RpcChannel setup. 3227 ClientProtos.ClientService.BlockingInterface stub = connection.getClient(serverName); 3228 CoprocessorServiceResponse result; 3229 try { 3230 result = 3231 stub.execRegionServerService(connection.getRpcControllerFactory().newController(), csr); 3232 return CoprocessorRpcUtils.getResponse(result, responsePrototype); 3233 } catch (ServiceException e) { 3234 throw ProtobufUtil.handleRemoteException(e); 3235 } 3236 } 3237 }; 3238 } 3239 3240 @Override 3241 public void updateConfiguration(final ServerName server) throws IOException { 3242 final AdminService.BlockingInterface admin = this.connection.getAdmin(server); 3243 Callable<Void> callable = new Callable<Void>() { 3244 @Override 3245 public Void call() throws Exception { 3246 admin.updateConfiguration(null, UpdateConfigurationRequest.getDefaultInstance()); 3247 return null; 3248 } 3249 }; 3250 ProtobufUtil.call(callable); 3251 } 3252 3253 @Override 3254 public void updateConfiguration() throws IOException { 3255 ClusterMetrics status = 3256 getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.MASTER, Option.BACKUP_MASTERS)); 3257 for (ServerName server : status.getLiveServerMetrics().keySet()) { 3258 updateConfiguration(server); 3259 } 3260 3261 updateConfiguration(status.getMasterName()); 3262 3263 for (ServerName server : status.getBackupMasterNames()) { 3264 updateConfiguration(server); 3265 } 3266 } 3267 3268 @Override 3269 public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException { 3270 return executeCallable(new MasterCallable<Long>(getConnection(), getRpcControllerFactory()) { 3271 @Override 3272 protected Long rpcCall() throws Exception { 3273 MajorCompactionTimestampRequest req = MajorCompactionTimestampRequest.newBuilder() 3274 .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); 3275 return master.getLastMajorCompactionTimestamp(getRpcController(), req) 3276 .getCompactionTimestamp(); 3277 } 3278 }); 3279 } 3280 3281 @Override 3282 public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException { 3283 return executeCallable(new MasterCallable<Long>(getConnection(), getRpcControllerFactory()) { 3284 @Override 3285 protected Long rpcCall() throws Exception { 3286 MajorCompactionTimestampForRegionRequest req = 3287 MajorCompactionTimestampForRegionRequest.newBuilder() 3288 .setRegion( 3289 RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)) 3290 .build(); 3291 return master.getLastMajorCompactionTimestampForRegion(getRpcController(), req) 3292 .getCompactionTimestamp(); 3293 } 3294 }); 3295 } 3296 3297 /** 3298 * {@inheritDoc} 3299 */ 3300 @Override 3301 public void compact(final TableName tableName, final byte[] columnFamily, CompactType compactType) 3302 throws IOException, InterruptedException { 3303 compact(tableName, columnFamily, false, compactType); 3304 } 3305 3306 /** 3307 * {@inheritDoc} 3308 */ 3309 @Override 3310 public void compact(final TableName tableName, CompactType compactType) 3311 throws IOException, InterruptedException { 3312 compact(tableName, null, false, compactType); 3313 } 3314 3315 /** 3316 * {@inheritDoc} 3317 */ 3318 @Override 3319 public void majorCompact(final TableName tableName, final byte[] columnFamily, 3320 CompactType compactType) throws IOException, InterruptedException { 3321 compact(tableName, columnFamily, true, compactType); 3322 } 3323 3324 /** 3325 * {@inheritDoc} 3326 */ 3327 @Override 3328 public void majorCompact(final TableName tableName, CompactType compactType) 3329 throws IOException, InterruptedException { 3330 compact(tableName, null, true, compactType); 3331 } 3332 3333 /** 3334 * {@inheritDoc} 3335 */ 3336 @Override 3337 public CompactionState getCompactionState(final TableName tableName, CompactType compactType) 3338 throws IOException { 3339 checkTableExists(tableName); 3340 if (!isTableEnabled(tableName)) { 3341 // If the table is disabled, the compaction state of the table should always be NONE 3342 return ProtobufUtil 3343 .createCompactionState(AdminProtos.GetRegionInfoResponse.CompactionState.NONE); 3344 } 3345 3346 AdminProtos.GetRegionInfoResponse.CompactionState state = 3347 AdminProtos.GetRegionInfoResponse.CompactionState.NONE; 3348 3349 // TODO: There is no timeout on this controller. Set one! 3350 HBaseRpcController rpcController = rpcControllerFactory.newController(); 3351 switch (compactType) { 3352 case MOB: 3353 final AdminProtos.AdminService.BlockingInterface masterAdmin = 3354 this.connection.getAdminForMaster(); 3355 Callable<AdminProtos.GetRegionInfoResponse.CompactionState> callable = 3356 new Callable<AdminProtos.GetRegionInfoResponse.CompactionState>() { 3357 @Override 3358 public AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception { 3359 RegionInfo info = RegionInfo.createMobRegionInfo(tableName); 3360 GetRegionInfoRequest request = 3361 RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true); 3362 GetRegionInfoResponse response = masterAdmin.getRegionInfo(rpcController, request); 3363 return response.getCompactionState(); 3364 } 3365 }; 3366 state = ProtobufUtil.call(callable); 3367 break; 3368 case NORMAL: 3369 for (HRegionLocation loc : connection.locateRegions(tableName, false, false)) { 3370 ServerName sn = loc.getServerName(); 3371 if (sn == null) { 3372 continue; 3373 } 3374 byte[] regionName = loc.getRegion().getRegionName(); 3375 AdminService.BlockingInterface snAdmin = this.connection.getAdmin(sn); 3376 try { 3377 Callable<GetRegionInfoResponse> regionInfoCallable = 3378 new Callable<GetRegionInfoResponse>() { 3379 @Override 3380 public GetRegionInfoResponse call() throws Exception { 3381 GetRegionInfoRequest request = 3382 RequestConverter.buildGetRegionInfoRequest(regionName, true); 3383 return snAdmin.getRegionInfo(rpcController, request); 3384 } 3385 }; 3386 GetRegionInfoResponse response = ProtobufUtil.call(regionInfoCallable); 3387 switch (response.getCompactionState()) { 3388 case MAJOR_AND_MINOR: 3389 return CompactionState.MAJOR_AND_MINOR; 3390 case MAJOR: 3391 if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MINOR) { 3392 return CompactionState.MAJOR_AND_MINOR; 3393 } 3394 state = AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR; 3395 break; 3396 case MINOR: 3397 if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR) { 3398 return CompactionState.MAJOR_AND_MINOR; 3399 } 3400 state = AdminProtos.GetRegionInfoResponse.CompactionState.MINOR; 3401 break; 3402 case NONE: 3403 default: // nothing, continue 3404 } 3405 } catch (NotServingRegionException e) { 3406 if (LOG.isDebugEnabled()) { 3407 LOG.debug("Trying to get compaction state of " + loc.getRegion() + ": " 3408 + StringUtils.stringifyException(e)); 3409 } 3410 } catch (RemoteException e) { 3411 if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) { 3412 if (LOG.isDebugEnabled()) { 3413 LOG.debug("Trying to get compaction state of " + loc.getRegion() + ": " 3414 + StringUtils.stringifyException(e)); 3415 } 3416 } else { 3417 throw e; 3418 } 3419 } 3420 } 3421 break; 3422 default: 3423 throw new IllegalArgumentException("Unknown compactType: " + compactType); 3424 } 3425 if (state != null) { 3426 return ProtobufUtil.createCompactionState(state); 3427 } 3428 return null; 3429 } 3430 3431 /** 3432 * Future that waits on a procedure result. Returned by the async version of the Admin calls, and 3433 * used internally by the sync calls to wait on the result of the procedure. 3434 */ 3435 @InterfaceAudience.Private 3436 @InterfaceStability.Evolving 3437 protected static class ProcedureFuture<V> implements Future<V> { 3438 private ExecutionException exception = null; 3439 private boolean procResultFound = false; 3440 private boolean done = false; 3441 private boolean cancelled = false; 3442 private V result = null; 3443 3444 private final HBaseAdmin admin; 3445 protected final Long procId; 3446 3447 public ProcedureFuture(final HBaseAdmin admin, final Long procId) { 3448 this.admin = admin; 3449 this.procId = procId; 3450 } 3451 3452 @Override 3453 public boolean cancel(boolean mayInterruptIfRunning) { 3454 AbortProcedureRequest abortProcRequest = AbortProcedureRequest.newBuilder().setProcId(procId) 3455 .setMayInterruptIfRunning(mayInterruptIfRunning).build(); 3456 try { 3457 cancelled = abortProcedureResult(abortProcRequest).getIsProcedureAborted(); 3458 if (cancelled) { 3459 done = true; 3460 } 3461 } catch (IOException e) { 3462 // Cancell thrown exception for some reason. At this time, we are not sure whether 3463 // the cancell succeeds or fails. We assume that it is failed, but print out a warning 3464 // for debugging purpose. 3465 LOG.warn( 3466 "Cancelling the procedure with procId=" + procId + " throws exception " + e.getMessage(), 3467 e); 3468 cancelled = false; 3469 } 3470 return cancelled; 3471 } 3472 3473 @Override 3474 public boolean isCancelled() { 3475 return cancelled; 3476 } 3477 3478 protected AbortProcedureResponse abortProcedureResult(final AbortProcedureRequest request) 3479 throws IOException { 3480 return admin.executeCallable(new MasterCallable<AbortProcedureResponse>(admin.getConnection(), 3481 admin.getRpcControllerFactory()) { 3482 @Override 3483 protected AbortProcedureResponse rpcCall() throws Exception { 3484 return master.abortProcedure(getRpcController(), request); 3485 } 3486 }); 3487 } 3488 3489 @Override 3490 public V get() throws InterruptedException, ExecutionException { 3491 // TODO: should we ever spin forever? 3492 // fix HBASE-21715. TODO: If the function call get() without timeout limit is not allowed, 3493 // is it possible to compose instead of inheriting from the class Future for this class? 3494 try { 3495 return get(admin.getProcedureTimeout, TimeUnit.MILLISECONDS); 3496 } catch (TimeoutException e) { 3497 LOG.warn("Failed to get the procedure with procId=" + procId + " throws exception " 3498 + e.getMessage(), e); 3499 return null; 3500 } 3501 } 3502 3503 @Override 3504 public V get(long timeout, TimeUnit unit) 3505 throws InterruptedException, ExecutionException, TimeoutException { 3506 if (!done) { 3507 long deadlineTs = EnvironmentEdgeManager.currentTime() + unit.toMillis(timeout); 3508 try { 3509 try { 3510 // if the master support procedures, try to wait the result 3511 if (procId != null) { 3512 result = waitProcedureResult(procId, deadlineTs); 3513 } 3514 // if we don't have a proc result, try the compatibility wait 3515 if (!procResultFound) { 3516 result = waitOperationResult(deadlineTs); 3517 } 3518 result = postOperationResult(result, deadlineTs); 3519 done = true; 3520 } catch (IOException e) { 3521 result = postOperationFailure(e, deadlineTs); 3522 done = true; 3523 } 3524 } catch (IOException e) { 3525 exception = new ExecutionException(e); 3526 done = true; 3527 } 3528 } 3529 if (exception != null) { 3530 throw exception; 3531 } 3532 return result; 3533 } 3534 3535 @Override 3536 public boolean isDone() { 3537 return done; 3538 } 3539 3540 protected HBaseAdmin getAdmin() { 3541 return admin; 3542 } 3543 3544 private V waitProcedureResult(long procId, long deadlineTs) 3545 throws IOException, TimeoutException, InterruptedException { 3546 GetProcedureResultRequest request = 3547 GetProcedureResultRequest.newBuilder().setProcId(procId).build(); 3548 3549 int tries = 0; 3550 IOException serviceEx = null; 3551 while (EnvironmentEdgeManager.currentTime() < deadlineTs) { 3552 GetProcedureResultResponse response = null; 3553 try { 3554 // Try to fetch the result 3555 response = getProcedureResult(request); 3556 } catch (IOException e) { 3557 serviceEx = unwrapException(e); 3558 3559 // the master may be down 3560 LOG.warn("failed to get the procedure result procId=" + procId, serviceEx); 3561 3562 // Not much to do, if we have a DoNotRetryIOException 3563 if (serviceEx instanceof DoNotRetryIOException) { 3564 // TODO: looks like there is no way to unwrap this exception and get the proper 3565 // UnsupportedOperationException aside from looking at the message. 3566 // anyway, if we fail here we just failover to the compatibility side 3567 // and that is always a valid solution. 3568 LOG.warn("Proc-v2 is unsupported on this master: " + serviceEx.getMessage(), serviceEx); 3569 procResultFound = false; 3570 return null; 3571 } 3572 } 3573 3574 // If the procedure is no longer running, we should have a result 3575 if (response != null && response.getState() != GetProcedureResultResponse.State.RUNNING) { 3576 procResultFound = response.getState() != GetProcedureResultResponse.State.NOT_FOUND; 3577 return convertResult(response); 3578 } 3579 3580 try { 3581 Thread.sleep(getAdmin().getPauseTime(tries++)); 3582 } catch (InterruptedException e) { 3583 throw new InterruptedException( 3584 "Interrupted while waiting for the result of proc " + procId); 3585 } 3586 } 3587 if (serviceEx != null) { 3588 throw serviceEx; 3589 } else { 3590 throw new TimeoutException("The procedure " + procId + " is still running"); 3591 } 3592 } 3593 3594 private static IOException unwrapException(IOException e) { 3595 if (e instanceof RemoteException) { 3596 return ((RemoteException) e).unwrapRemoteException(); 3597 } 3598 return e; 3599 } 3600 3601 protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request) 3602 throws IOException { 3603 return admin.executeCallable(new MasterCallable<GetProcedureResultResponse>( 3604 admin.getConnection(), admin.getRpcControllerFactory()) { 3605 @Override 3606 protected GetProcedureResultResponse rpcCall() throws Exception { 3607 return master.getProcedureResult(getRpcController(), request); 3608 } 3609 }); 3610 } 3611 3612 /** 3613 * Convert the procedure result response to a specified type. 3614 * @param response the procedure result object to parse 3615 * @return the result data of the procedure. 3616 */ 3617 protected V convertResult(final GetProcedureResultResponse response) throws IOException { 3618 if (response.hasException()) { 3619 throw ForeignExceptionUtil.toIOException(response.getException()); 3620 } 3621 return null; 3622 } 3623 3624 /** 3625 * Fallback implementation in case the procedure is not supported by the server. It should try 3626 * to wait until the operation is completed. 3627 * @param deadlineTs the timestamp after which this method should throw a TimeoutException 3628 * @return the result data of the operation 3629 */ 3630 protected V waitOperationResult(final long deadlineTs) throws IOException, TimeoutException { 3631 return null; 3632 } 3633 3634 /** 3635 * Called after the operation is completed and the result fetched. this allows to perform extra 3636 * steps after the procedure is completed. it allows to apply transformations to the result that 3637 * will be returned by get(). 3638 * @param result the result of the procedure 3639 * @param deadlineTs the timestamp after which this method should throw a TimeoutException 3640 * @return the result of the procedure, which may be the same as the passed one 3641 */ 3642 protected V postOperationResult(final V result, final long deadlineTs) 3643 throws IOException, TimeoutException { 3644 return result; 3645 } 3646 3647 /** 3648 * Called after the operation is terminated with a failure. this allows to perform extra steps 3649 * after the procedure is terminated. it allows to apply transformations to the result that will 3650 * be returned by get(). The default implementation will rethrow the exception 3651 * @param exception the exception got from fetching the result 3652 * @param deadlineTs the timestamp after which this method should throw a TimeoutException 3653 * @return the result of the procedure, which may be the same as the passed one 3654 */ 3655 protected V postOperationFailure(final IOException exception, final long deadlineTs) 3656 throws IOException, TimeoutException { 3657 throw exception; 3658 } 3659 3660 protected interface WaitForStateCallable { 3661 boolean checkState(int tries) throws IOException; 3662 3663 void throwInterruptedException() throws InterruptedIOException; 3664 3665 void throwTimeoutException(long elapsed) throws TimeoutException; 3666 } 3667 3668 protected void waitForState(final long deadlineTs, final WaitForStateCallable callable) 3669 throws IOException, TimeoutException { 3670 int tries = 0; 3671 IOException serverEx = null; 3672 long startTime = EnvironmentEdgeManager.currentTime(); 3673 while (EnvironmentEdgeManager.currentTime() < deadlineTs) { 3674 serverEx = null; 3675 try { 3676 if (callable.checkState(tries)) { 3677 return; 3678 } 3679 } catch (IOException e) { 3680 serverEx = e; 3681 } 3682 try { 3683 Thread.sleep(getAdmin().getPauseTime(tries++)); 3684 } catch (InterruptedException e) { 3685 callable.throwInterruptedException(); 3686 } 3687 } 3688 if (serverEx != null) { 3689 throw unwrapException(serverEx); 3690 } else { 3691 callable.throwTimeoutException(EnvironmentEdgeManager.currentTime() - startTime); 3692 } 3693 } 3694 } 3695 3696 @InterfaceAudience.Private 3697 @InterfaceStability.Evolving 3698 protected static abstract class TableFuture<V> extends ProcedureFuture<V> { 3699 private final TableName tableName; 3700 3701 public TableFuture(final HBaseAdmin admin, final TableName tableName, final Long procId) { 3702 super(admin, procId); 3703 this.tableName = tableName; 3704 } 3705 3706 @Override 3707 public String toString() { 3708 return getDescription(); 3709 } 3710 3711 /** Returns the table name */ 3712 protected TableName getTableName() { 3713 return tableName; 3714 } 3715 3716 /** Returns the table descriptor */ 3717 protected TableDescriptor getTableDescriptor() throws IOException { 3718 return getAdmin().getDescriptor(getTableName()); 3719 } 3720 3721 /** Returns the operation type like CREATE, DELETE, DISABLE etc. */ 3722 public abstract String getOperationType(); 3723 3724 /** Returns a description of the operation */ 3725 protected String getDescription() { 3726 return "Operation: " + getOperationType() + ", " + "Table Name: " 3727 + tableName.getNameWithNamespaceInclAsString() + ", procId: " + procId; 3728 } 3729 3730 protected abstract class TableWaitForStateCallable implements WaitForStateCallable { 3731 @Override 3732 public void throwInterruptedException() throws InterruptedIOException { 3733 throw new InterruptedIOException("Interrupted while waiting for " + getDescription()); 3734 } 3735 3736 @Override 3737 public void throwTimeoutException(long elapsedTime) throws TimeoutException { 3738 throw new TimeoutException( 3739 getDescription() + " has not completed after " + elapsedTime + "ms"); 3740 } 3741 } 3742 3743 @Override 3744 protected V postOperationResult(final V result, final long deadlineTs) 3745 throws IOException, TimeoutException { 3746 LOG.info(getDescription() + " completed"); 3747 return super.postOperationResult(result, deadlineTs); 3748 } 3749 3750 @Override 3751 protected V postOperationFailure(final IOException exception, final long deadlineTs) 3752 throws IOException, TimeoutException { 3753 LOG.info(getDescription() + " failed with " + exception.getMessage()); 3754 return super.postOperationFailure(exception, deadlineTs); 3755 } 3756 3757 protected void waitForTableEnabled(final long deadlineTs) throws IOException, TimeoutException { 3758 waitForState(deadlineTs, new TableWaitForStateCallable() { 3759 @Override 3760 public boolean checkState(int tries) throws IOException { 3761 try { 3762 if (getAdmin().isTableAvailable(tableName)) { 3763 return true; 3764 } 3765 } catch (TableNotFoundException tnfe) { 3766 LOG.debug("Table " + tableName.getNameWithNamespaceInclAsString() 3767 + " was not enabled, sleeping. tries=" + tries); 3768 } 3769 return false; 3770 } 3771 }); 3772 } 3773 3774 protected void waitForTableDisabled(final long deadlineTs) 3775 throws IOException, TimeoutException { 3776 waitForState(deadlineTs, new TableWaitForStateCallable() { 3777 @Override 3778 public boolean checkState(int tries) throws IOException { 3779 return getAdmin().isTableDisabled(tableName); 3780 } 3781 }); 3782 } 3783 3784 protected void waitTableNotFound(final long deadlineTs) throws IOException, TimeoutException { 3785 waitForState(deadlineTs, new TableWaitForStateCallable() { 3786 @Override 3787 public boolean checkState(int tries) throws IOException { 3788 return !getAdmin().tableExists(tableName); 3789 } 3790 }); 3791 } 3792 3793 protected void waitForSchemaUpdate(final long deadlineTs) throws IOException, TimeoutException { 3794 waitForState(deadlineTs, new TableWaitForStateCallable() { 3795 @Override 3796 public boolean checkState(int tries) throws IOException { 3797 return getAdmin().getAlterStatus(tableName).getFirst() == 0; 3798 } 3799 }); 3800 } 3801 3802 protected void waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys) 3803 throws IOException, TimeoutException { 3804 final TableDescriptor desc = getTableDescriptor(); 3805 final AtomicInteger actualRegCount = new AtomicInteger(0); 3806 final MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { 3807 @Override 3808 public boolean visit(Result rowResult) throws IOException { 3809 RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult); 3810 if (list == null) { 3811 LOG.warn("No serialized HRegionInfo in " + rowResult); 3812 return true; 3813 } 3814 HRegionLocation l = list.getRegionLocation(); 3815 if (l == null) { 3816 return true; 3817 } 3818 if (!l.getRegionInfo().getTable().equals(desc.getTableName())) { 3819 return false; 3820 } 3821 if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true; 3822 HRegionLocation[] locations = list.getRegionLocations(); 3823 for (HRegionLocation location : locations) { 3824 if (location == null) continue; 3825 ServerName serverName = location.getServerName(); 3826 // Make sure that regions are assigned to server 3827 if (serverName != null && serverName.getAddress() != null) { 3828 actualRegCount.incrementAndGet(); 3829 } 3830 } 3831 return true; 3832 } 3833 }; 3834 3835 int tries = 0; 3836 int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication(); 3837 while (EnvironmentEdgeManager.currentTime() < deadlineTs) { 3838 actualRegCount.set(0); 3839 MetaTableAccessor.scanMetaForTableRegions(getAdmin().getConnection(), visitor, 3840 desc.getTableName()); 3841 if (actualRegCount.get() == numRegs) { 3842 // all the regions are online 3843 return; 3844 } 3845 3846 try { 3847 Thread.sleep(getAdmin().getPauseTime(tries++)); 3848 } catch (InterruptedException e) { 3849 throw new InterruptedIOException("Interrupted when opening" + " regions; " 3850 + actualRegCount.get() + " of " + numRegs + " regions processed so far"); 3851 } 3852 } 3853 throw new TimeoutException("Only " + actualRegCount.get() + " of " + numRegs 3854 + " regions are online; retries exhausted."); 3855 } 3856 } 3857 3858 @InterfaceAudience.Private 3859 @InterfaceStability.Evolving 3860 protected static abstract class NamespaceFuture extends ProcedureFuture<Void> { 3861 private final String namespaceName; 3862 3863 public NamespaceFuture(final HBaseAdmin admin, final String namespaceName, final Long procId) { 3864 super(admin, procId); 3865 this.namespaceName = namespaceName; 3866 } 3867 3868 /** Returns the namespace name */ 3869 protected String getNamespaceName() { 3870 return namespaceName; 3871 } 3872 3873 /** Returns the operation type like CREATE_NAMESPACE, DELETE_NAMESPACE, etc. */ 3874 public abstract String getOperationType(); 3875 3876 @Override 3877 public String toString() { 3878 return "Operation: " + getOperationType() + ", Namespace: " + getNamespaceName(); 3879 } 3880 } 3881 3882 @InterfaceAudience.Private 3883 @InterfaceStability.Evolving 3884 private static class ReplicationFuture extends ProcedureFuture<Void> { 3885 private final String peerId; 3886 private final Supplier<String> getOperation; 3887 3888 public ReplicationFuture(HBaseAdmin admin, String peerId, Long procId, 3889 Supplier<String> getOperation) { 3890 super(admin, procId); 3891 this.peerId = peerId; 3892 this.getOperation = getOperation; 3893 } 3894 3895 @Override 3896 public String toString() { 3897 return "Operation: " + getOperation.get() + ", peerId: " + peerId; 3898 } 3899 } 3900 3901 @Override 3902 public List<SecurityCapability> getSecurityCapabilities() throws IOException { 3903 try { 3904 return executeCallable( 3905 new MasterCallable<List<SecurityCapability>>(getConnection(), getRpcControllerFactory()) { 3906 @Override 3907 protected List<SecurityCapability> rpcCall() throws Exception { 3908 SecurityCapabilitiesRequest req = SecurityCapabilitiesRequest.newBuilder().build(); 3909 return ProtobufUtil.toSecurityCapabilityList( 3910 master.getSecurityCapabilities(getRpcController(), req).getCapabilitiesList()); 3911 } 3912 }); 3913 } catch (IOException e) { 3914 if (e instanceof RemoteException) { 3915 e = ((RemoteException) e).unwrapRemoteException(); 3916 } 3917 throw e; 3918 } 3919 } 3920 3921 @Override 3922 public boolean splitSwitch(boolean enabled, boolean synchronous) throws IOException { 3923 return splitOrMergeSwitch(enabled, synchronous, MasterSwitchType.SPLIT); 3924 } 3925 3926 @Override 3927 public boolean mergeSwitch(boolean enabled, boolean synchronous) throws IOException { 3928 return splitOrMergeSwitch(enabled, synchronous, MasterSwitchType.MERGE); 3929 } 3930 3931 private boolean splitOrMergeSwitch(boolean enabled, boolean synchronous, 3932 MasterSwitchType switchType) throws IOException { 3933 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 3934 @Override 3935 protected Boolean rpcCall() throws Exception { 3936 MasterProtos.SetSplitOrMergeEnabledResponse response = 3937 master.setSplitOrMergeEnabled(getRpcController(), 3938 RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchType)); 3939 return response.getPrevValueList().get(0); 3940 } 3941 }); 3942 } 3943 3944 @Override 3945 public boolean isSplitEnabled() throws IOException { 3946 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 3947 @Override 3948 protected Boolean rpcCall() throws Exception { 3949 return master 3950 .isSplitOrMergeEnabled(getRpcController(), 3951 RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.SPLIT)) 3952 .getEnabled(); 3953 } 3954 }); 3955 } 3956 3957 @Override 3958 public boolean isMergeEnabled() throws IOException { 3959 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 3960 @Override 3961 protected Boolean rpcCall() throws Exception { 3962 return master 3963 .isSplitOrMergeEnabled(getRpcController(), 3964 RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.MERGE)) 3965 .getEnabled(); 3966 } 3967 }); 3968 } 3969 3970 private RpcControllerFactory getRpcControllerFactory() { 3971 return this.rpcControllerFactory; 3972 } 3973 3974 @Override 3975 public Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig, 3976 boolean enabled) throws IOException { 3977 AddReplicationPeerResponse response = executeCallable( 3978 new MasterCallable<AddReplicationPeerResponse>(getConnection(), getRpcControllerFactory()) { 3979 @Override 3980 protected AddReplicationPeerResponse rpcCall() throws Exception { 3981 return master.addReplicationPeer(getRpcController(), 3982 RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, enabled)); 3983 } 3984 }); 3985 return new ReplicationFuture(this, peerId, response.getProcId(), () -> "ADD_REPLICATION_PEER"); 3986 } 3987 3988 @Override 3989 public Future<Void> removeReplicationPeerAsync(String peerId) throws IOException { 3990 RemoveReplicationPeerResponse response = 3991 executeCallable(new MasterCallable<RemoveReplicationPeerResponse>(getConnection(), 3992 getRpcControllerFactory()) { 3993 @Override 3994 protected RemoveReplicationPeerResponse rpcCall() throws Exception { 3995 return master.removeReplicationPeer(getRpcController(), 3996 RequestConverter.buildRemoveReplicationPeerRequest(peerId)); 3997 } 3998 }); 3999 return new ReplicationFuture(this, peerId, response.getProcId(), 4000 () -> "REMOVE_REPLICATION_PEER"); 4001 } 4002 4003 @Override 4004 public Future<Void> enableReplicationPeerAsync(final String peerId) throws IOException { 4005 EnableReplicationPeerResponse response = 4006 executeCallable(new MasterCallable<EnableReplicationPeerResponse>(getConnection(), 4007 getRpcControllerFactory()) { 4008 @Override 4009 protected EnableReplicationPeerResponse rpcCall() throws Exception { 4010 return master.enableReplicationPeer(getRpcController(), 4011 RequestConverter.buildEnableReplicationPeerRequest(peerId)); 4012 } 4013 }); 4014 return new ReplicationFuture(this, peerId, response.getProcId(), 4015 () -> "ENABLE_REPLICATION_PEER"); 4016 } 4017 4018 @Override 4019 public Future<Void> disableReplicationPeerAsync(final String peerId) throws IOException { 4020 DisableReplicationPeerResponse response = 4021 executeCallable(new MasterCallable<DisableReplicationPeerResponse>(getConnection(), 4022 getRpcControllerFactory()) { 4023 @Override 4024 protected DisableReplicationPeerResponse rpcCall() throws Exception { 4025 return master.disableReplicationPeer(getRpcController(), 4026 RequestConverter.buildDisableReplicationPeerRequest(peerId)); 4027 } 4028 }); 4029 return new ReplicationFuture(this, peerId, response.getProcId(), 4030 () -> "DISABLE_REPLICATION_PEER"); 4031 } 4032 4033 @Override 4034 public ReplicationPeerConfig getReplicationPeerConfig(final String peerId) throws IOException { 4035 return executeCallable( 4036 new MasterCallable<ReplicationPeerConfig>(getConnection(), getRpcControllerFactory()) { 4037 @Override 4038 protected ReplicationPeerConfig rpcCall() throws Exception { 4039 GetReplicationPeerConfigResponse response = master.getReplicationPeerConfig( 4040 getRpcController(), RequestConverter.buildGetReplicationPeerConfigRequest(peerId)); 4041 return ReplicationPeerConfigUtil.convert(response.getPeerConfig()); 4042 } 4043 }); 4044 } 4045 4046 @Override 4047 public Future<Void> updateReplicationPeerConfigAsync(final String peerId, 4048 final ReplicationPeerConfig peerConfig) throws IOException { 4049 UpdateReplicationPeerConfigResponse response = 4050 executeCallable(new MasterCallable<UpdateReplicationPeerConfigResponse>(getConnection(), 4051 getRpcControllerFactory()) { 4052 @Override 4053 protected UpdateReplicationPeerConfigResponse rpcCall() throws Exception { 4054 return master.updateReplicationPeerConfig(getRpcController(), 4055 RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig)); 4056 } 4057 }); 4058 return new ReplicationFuture(this, peerId, response.getProcId(), 4059 () -> "UPDATE_REPLICATION_PEER_CONFIG"); 4060 } 4061 4062 @Override 4063 public List<ReplicationPeerDescription> listReplicationPeers() throws IOException { 4064 return listReplicationPeers((Pattern) null); 4065 } 4066 4067 @Override 4068 public List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) throws IOException { 4069 return executeCallable(new MasterCallable<List<ReplicationPeerDescription>>(getConnection(), 4070 getRpcControllerFactory()) { 4071 @Override 4072 protected List<ReplicationPeerDescription> rpcCall() throws Exception { 4073 List<ReplicationProtos.ReplicationPeerDescription> peersList = 4074 master.listReplicationPeers(getRpcController(), 4075 RequestConverter.buildListReplicationPeersRequest(pattern)).getPeerDescList(); 4076 List<ReplicationPeerDescription> result = new ArrayList<>(peersList.size()); 4077 for (ReplicationProtos.ReplicationPeerDescription peer : peersList) { 4078 result.add(ReplicationPeerConfigUtil.toReplicationPeerDescription(peer)); 4079 } 4080 return result; 4081 } 4082 }); 4083 } 4084 4085 @Override 4086 public void decommissionRegionServers(List<ServerName> servers, boolean offload) 4087 throws IOException { 4088 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 4089 @Override 4090 public Void rpcCall() throws ServiceException { 4091 master.decommissionRegionServers(getRpcController(), 4092 RequestConverter.buildDecommissionRegionServersRequest(servers, offload)); 4093 return null; 4094 } 4095 }); 4096 } 4097 4098 @Override 4099 public List<ServerName> listDecommissionedRegionServers() throws IOException { 4100 return executeCallable( 4101 new MasterCallable<List<ServerName>>(getConnection(), getRpcControllerFactory()) { 4102 @Override 4103 public List<ServerName> rpcCall() throws ServiceException { 4104 ListDecommissionedRegionServersRequest req = 4105 ListDecommissionedRegionServersRequest.newBuilder().build(); 4106 List<ServerName> servers = new ArrayList<>(); 4107 for (HBaseProtos.ServerName server : master 4108 .listDecommissionedRegionServers(getRpcController(), req).getServerNameList()) { 4109 servers.add(ProtobufUtil.toServerName(server)); 4110 } 4111 return servers; 4112 } 4113 }); 4114 } 4115 4116 @Override 4117 public void recommissionRegionServer(ServerName server, List<byte[]> encodedRegionNames) 4118 throws IOException { 4119 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 4120 @Override 4121 public Void rpcCall() throws ServiceException { 4122 master.recommissionRegionServer(getRpcController(), 4123 RequestConverter.buildRecommissionRegionServerRequest(server, encodedRegionNames)); 4124 return null; 4125 } 4126 }); 4127 } 4128 4129 @Override 4130 public List<TableCFs> listReplicatedTableCFs() throws IOException { 4131 List<TableCFs> replicatedTableCFs = new ArrayList<>(); 4132 List<TableDescriptor> tables = listTableDescriptors(); 4133 tables.forEach(table -> { 4134 Map<String, Integer> cfs = new HashMap<>(); 4135 Stream.of(table.getColumnFamilies()) 4136 .filter(column -> column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) 4137 .forEach(column -> { 4138 cfs.put(column.getNameAsString(), column.getScope()); 4139 }); 4140 if (!cfs.isEmpty()) { 4141 replicatedTableCFs.add(new TableCFs(table.getTableName(), cfs)); 4142 } 4143 }); 4144 return replicatedTableCFs; 4145 } 4146 4147 @Override 4148 public void enableTableReplication(final TableName tableName) throws IOException { 4149 if (tableName == null) { 4150 throw new IllegalArgumentException("Table name cannot be null"); 4151 } 4152 if (!tableExists(tableName)) { 4153 throw new TableNotFoundException( 4154 "Table '" + tableName.getNameAsString() + "' does not exists."); 4155 } 4156 byte[][] splits = getTableSplits(tableName); 4157 checkAndSyncTableDescToPeers(tableName, splits); 4158 setTableRep(tableName, true); 4159 } 4160 4161 @Override 4162 public void disableTableReplication(final TableName tableName) throws IOException { 4163 if (tableName == null) { 4164 throw new IllegalArgumentException("Table name is null"); 4165 } 4166 if (!tableExists(tableName)) { 4167 throw new TableNotFoundException( 4168 "Table '" + tableName.getNameAsString() + "' does not exists."); 4169 } 4170 setTableRep(tableName, false); 4171 } 4172 4173 @Override 4174 public boolean isReplicationPeerEnabled(String peerId) throws IOException { 4175 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 4176 @Override 4177 protected Boolean rpcCall() throws Exception { 4178 GetReplicationPeerStateRequest.Builder request = 4179 GetReplicationPeerStateRequest.newBuilder(); 4180 request.setPeerId(peerId); 4181 GetReplicationPeerStateResponse response = 4182 master.isReplicationPeerEnabled(getRpcController(), request.build()); 4183 return response.getIsEnabled(); 4184 } 4185 }); 4186 } 4187 4188 /** 4189 * Connect to peer and check the table descriptor on peer: 4190 * <ol> 4191 * <li>Create the same table on peer when not exist.</li> 4192 * <li>Throw an exception if the table already has replication enabled on any of the column 4193 * families.</li> 4194 * <li>Throw an exception if the table exists on peer cluster but descriptors are not same.</li> 4195 * </ol> 4196 * @param tableName name of the table to sync to the peer 4197 * @param splits table split keys 4198 * @throws IOException if a remote or network exception occurs 4199 */ 4200 private void checkAndSyncTableDescToPeers(final TableName tableName, final byte[][] splits) 4201 throws IOException { 4202 List<ReplicationPeerDescription> peers = listReplicationPeers(); 4203 if (peers == null || peers.size() <= 0) { 4204 throw new IllegalArgumentException("Found no peer cluster for replication."); 4205 } 4206 4207 for (ReplicationPeerDescription peerDesc : peers) { 4208 if (peerDesc.getPeerConfig().needToReplicate(tableName)) { 4209 Configuration peerConf = 4210 ReplicationPeerConfigUtil.getPeerClusterConfiguration(this.conf, peerDesc); 4211 try (Connection conn = ConnectionFactory.createConnection(peerConf); 4212 Admin repHBaseAdmin = conn.getAdmin()) { 4213 TableDescriptor tableDesc = getDescriptor(tableName); 4214 TableDescriptor peerTableDesc = null; 4215 if (!repHBaseAdmin.tableExists(tableName)) { 4216 repHBaseAdmin.createTable(tableDesc, splits); 4217 } else { 4218 peerTableDesc = repHBaseAdmin.getDescriptor(tableName); 4219 if (peerTableDesc == null) { 4220 throw new IllegalArgumentException("Failed to get table descriptor for table " 4221 + tableName.getNameAsString() + " from peer cluster " + peerDesc.getPeerId()); 4222 } 4223 if ( 4224 TableDescriptor.COMPARATOR_IGNORE_REPLICATION.compare(peerTableDesc, tableDesc) != 0 4225 ) { 4226 throw new IllegalArgumentException("Table " + tableName.getNameAsString() 4227 + " exists in peer cluster " + peerDesc.getPeerId() 4228 + ", but the table descriptors are not same when compared with source cluster." 4229 + " Thus can not enable the table's replication switch."); 4230 } 4231 } 4232 } 4233 } 4234 } 4235 } 4236 4237 /** 4238 * Set the table's replication switch if the table's replication switch is already not set. 4239 * @param tableName name of the table 4240 * @param enableRep is replication switch enable or disable 4241 * @throws IOException if a remote or network exception occurs 4242 */ 4243 private void setTableRep(final TableName tableName, boolean enableRep) throws IOException { 4244 TableDescriptor tableDesc = getDescriptor(tableName); 4245 if (!tableDesc.matchReplicationScope(enableRep)) { 4246 int scope = 4247 enableRep ? HConstants.REPLICATION_SCOPE_GLOBAL : HConstants.REPLICATION_SCOPE_LOCAL; 4248 modifyTable(TableDescriptorBuilder.newBuilder(tableDesc).setReplicationScope(scope).build()); 4249 } 4250 } 4251 4252 @Override 4253 public void clearCompactionQueues(final ServerName sn, final Set<String> queues) 4254 throws IOException, InterruptedException { 4255 if (queues == null || queues.size() == 0) { 4256 throw new IllegalArgumentException("queues cannot be null or empty"); 4257 } 4258 final AdminService.BlockingInterface admin = this.connection.getAdmin(sn); 4259 Callable<Void> callable = new Callable<Void>() { 4260 @Override 4261 public Void call() throws Exception { 4262 // TODO: There is no timeout on this controller. Set one! 4263 HBaseRpcController controller = rpcControllerFactory.newController(); 4264 ClearCompactionQueuesRequest request = 4265 RequestConverter.buildClearCompactionQueuesRequest(queues); 4266 admin.clearCompactionQueues(controller, request); 4267 return null; 4268 } 4269 }; 4270 ProtobufUtil.call(callable); 4271 } 4272 4273 @Override 4274 public List<ServerName> clearDeadServers(List<ServerName> servers) throws IOException { 4275 return executeCallable( 4276 new MasterCallable<List<ServerName>>(getConnection(), getRpcControllerFactory()) { 4277 @Override 4278 protected List<ServerName> rpcCall() throws Exception { 4279 ClearDeadServersRequest req = RequestConverter 4280 .buildClearDeadServersRequest(servers == null ? Collections.EMPTY_LIST : servers); 4281 return ProtobufUtil 4282 .toServerNameList(master.clearDeadServers(getRpcController(), req).getServerNameList()); 4283 } 4284 }); 4285 } 4286 4287 @Override 4288 public void cloneTableSchema(final TableName tableName, final TableName newTableName, 4289 final boolean preserveSplits) throws IOException { 4290 checkTableExists(tableName); 4291 if (tableExists(newTableName)) { 4292 throw new TableExistsException(newTableName); 4293 } 4294 TableDescriptor htd = TableDescriptorBuilder.copy(newTableName, getTableDescriptor(tableName)); 4295 if (preserveSplits) { 4296 createTable(htd, getTableSplits(tableName)); 4297 } else { 4298 createTable(htd); 4299 } 4300 } 4301 4302 @Override 4303 public boolean switchRpcThrottle(final boolean enable) throws IOException { 4304 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 4305 @Override 4306 protected Boolean rpcCall() throws Exception { 4307 return this.master.switchRpcThrottle(getRpcController(), 4308 MasterProtos.SwitchRpcThrottleRequest.newBuilder().setRpcThrottleEnabled(enable).build()) 4309 .getPreviousRpcThrottleEnabled(); 4310 } 4311 }); 4312 } 4313 4314 @Override 4315 public boolean isRpcThrottleEnabled() throws IOException { 4316 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 4317 @Override 4318 protected Boolean rpcCall() throws Exception { 4319 return this.master.isRpcThrottleEnabled(getRpcController(), 4320 IsRpcThrottleEnabledRequest.newBuilder().build()).getRpcThrottleEnabled(); 4321 } 4322 }); 4323 } 4324 4325 @Override 4326 public boolean exceedThrottleQuotaSwitch(final boolean enable) throws IOException { 4327 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 4328 @Override 4329 protected Boolean rpcCall() throws Exception { 4330 return this.master 4331 .switchExceedThrottleQuota(getRpcController(), 4332 MasterProtos.SwitchExceedThrottleQuotaRequest.newBuilder() 4333 .setExceedThrottleQuotaEnabled(enable).build()) 4334 .getPreviousExceedThrottleQuotaEnabled(); 4335 } 4336 }); 4337 } 4338 4339 @Override 4340 public Map<TableName, Long> getSpaceQuotaTableSizes() throws IOException { 4341 return executeCallable( 4342 new MasterCallable<Map<TableName, Long>>(getConnection(), getRpcControllerFactory()) { 4343 @Override 4344 protected Map<TableName, Long> rpcCall() throws Exception { 4345 GetSpaceQuotaRegionSizesResponse resp = master.getSpaceQuotaRegionSizes( 4346 getRpcController(), RequestConverter.buildGetSpaceQuotaRegionSizesRequest()); 4347 Map<TableName, Long> tableSizes = new HashMap<>(); 4348 for (RegionSizes sizes : resp.getSizesList()) { 4349 TableName tn = ProtobufUtil.toTableName(sizes.getTableName()); 4350 tableSizes.put(tn, sizes.getSize()); 4351 } 4352 return tableSizes; 4353 } 4354 }); 4355 } 4356 4357 @Override 4358 public Map<TableName, SpaceQuotaSnapshot> 4359 getRegionServerSpaceQuotaSnapshots(ServerName serverName) throws IOException { 4360 final AdminService.BlockingInterface admin = this.connection.getAdmin(serverName); 4361 Callable<GetSpaceQuotaSnapshotsResponse> callable = 4362 new Callable<GetSpaceQuotaSnapshotsResponse>() { 4363 @Override 4364 public GetSpaceQuotaSnapshotsResponse call() throws Exception { 4365 return admin.getSpaceQuotaSnapshots(rpcControllerFactory.newController(), 4366 RequestConverter.buildGetSpaceQuotaSnapshotsRequest()); 4367 } 4368 }; 4369 GetSpaceQuotaSnapshotsResponse resp = ProtobufUtil.call(callable); 4370 Map<TableName, SpaceQuotaSnapshot> snapshots = new HashMap<>(); 4371 for (TableQuotaSnapshot snapshot : resp.getSnapshotsList()) { 4372 snapshots.put(ProtobufUtil.toTableName(snapshot.getTableName()), 4373 SpaceQuotaSnapshot.toSpaceQuotaSnapshot(snapshot.getSnapshot())); 4374 } 4375 return snapshots; 4376 } 4377 4378 @Override 4379 public SpaceQuotaSnapshot getCurrentSpaceQuotaSnapshot(String namespace) throws IOException { 4380 return executeCallable( 4381 new MasterCallable<SpaceQuotaSnapshot>(getConnection(), getRpcControllerFactory()) { 4382 @Override 4383 protected SpaceQuotaSnapshot rpcCall() throws Exception { 4384 GetQuotaStatesResponse resp = master.getQuotaStates(getRpcController(), 4385 RequestConverter.buildGetQuotaStatesRequest()); 4386 for (GetQuotaStatesResponse.NamespaceQuotaSnapshot nsSnapshot : resp 4387 .getNsSnapshotsList()) { 4388 if (namespace.equals(nsSnapshot.getNamespace())) { 4389 return SpaceQuotaSnapshot.toSpaceQuotaSnapshot(nsSnapshot.getSnapshot()); 4390 } 4391 } 4392 return null; 4393 } 4394 }); 4395 } 4396 4397 @Override 4398 public SpaceQuotaSnapshot getCurrentSpaceQuotaSnapshot(TableName tableName) throws IOException { 4399 return executeCallable( 4400 new MasterCallable<SpaceQuotaSnapshot>(getConnection(), getRpcControllerFactory()) { 4401 @Override 4402 protected SpaceQuotaSnapshot rpcCall() throws Exception { 4403 GetQuotaStatesResponse resp = master.getQuotaStates(getRpcController(), 4404 RequestConverter.buildGetQuotaStatesRequest()); 4405 HBaseProtos.TableName protoTableName = ProtobufUtil.toProtoTableName(tableName); 4406 for (GetQuotaStatesResponse.TableQuotaSnapshot tableSnapshot : resp 4407 .getTableSnapshotsList()) { 4408 if (protoTableName.equals(tableSnapshot.getTableName())) { 4409 return SpaceQuotaSnapshot.toSpaceQuotaSnapshot(tableSnapshot.getSnapshot()); 4410 } 4411 } 4412 return null; 4413 } 4414 }); 4415 } 4416 4417 @Override 4418 public void grant(UserPermission userPermission, boolean mergeExistingPermissions) 4419 throws IOException { 4420 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 4421 @Override 4422 protected Void rpcCall() throws Exception { 4423 GrantRequest req = 4424 ShadedAccessControlUtil.buildGrantRequest(userPermission, mergeExistingPermissions); 4425 this.master.grant(getRpcController(), req); 4426 return null; 4427 } 4428 }); 4429 } 4430 4431 @Override 4432 public void revoke(UserPermission userPermission) throws IOException { 4433 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 4434 @Override 4435 protected Void rpcCall() throws Exception { 4436 RevokeRequest req = ShadedAccessControlUtil.buildRevokeRequest(userPermission); 4437 this.master.revoke(getRpcController(), req); 4438 return null; 4439 } 4440 }); 4441 } 4442 4443 @Override 4444 public List<UserPermission> 4445 getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) throws IOException { 4446 return executeCallable( 4447 new MasterCallable<List<UserPermission>>(getConnection(), getRpcControllerFactory()) { 4448 @Override 4449 protected List<UserPermission> rpcCall() throws Exception { 4450 AccessControlProtos.GetUserPermissionsRequest req = 4451 ShadedAccessControlUtil.buildGetUserPermissionsRequest(getUserPermissionsRequest); 4452 AccessControlProtos.GetUserPermissionsResponse response = 4453 this.master.getUserPermissions(getRpcController(), req); 4454 return response.getUserPermissionList().stream() 4455 .map(userPermission -> ShadedAccessControlUtil.toUserPermission(userPermission)) 4456 .collect(Collectors.toList()); 4457 } 4458 }); 4459 } 4460 4461 @Override 4462 public Future<Void> splitRegionAsync(byte[] regionName) throws IOException { 4463 return splitRegionAsync(regionName, null); 4464 } 4465 4466 @Override 4467 public Future<Void> createTableAsync(TableDescriptor desc) throws IOException { 4468 return createTableAsync(desc, null); 4469 } 4470 4471 @Override 4472 public List<Boolean> hasUserPermissions(String userName, List<Permission> permissions) 4473 throws IOException { 4474 return executeCallable( 4475 new MasterCallable<List<Boolean>>(getConnection(), getRpcControllerFactory()) { 4476 @Override 4477 protected List<Boolean> rpcCall() throws Exception { 4478 HasUserPermissionsRequest request = 4479 ShadedAccessControlUtil.buildHasUserPermissionsRequest(userName, permissions); 4480 return this.master.hasUserPermissions(getRpcController(), request) 4481 .getHasUserPermissionList(); 4482 } 4483 }); 4484 } 4485 4486 @Override 4487 public boolean snapshotCleanupSwitch(boolean on, boolean synchronous) throws IOException { 4488 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 4489 4490 @Override 4491 protected Boolean rpcCall() throws Exception { 4492 SetSnapshotCleanupRequest req = 4493 RequestConverter.buildSetSnapshotCleanupRequest(on, synchronous); 4494 return master.switchSnapshotCleanup(getRpcController(), req).getPrevSnapshotCleanup(); 4495 } 4496 }); 4497 4498 } 4499 4500 @Override 4501 public boolean isSnapshotCleanupEnabled() throws IOException { 4502 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 4503 4504 @Override 4505 protected Boolean rpcCall() throws Exception { 4506 IsSnapshotCleanupEnabledRequest req = 4507 RequestConverter.buildIsSnapshotCleanupEnabledRequest(); 4508 return master.isSnapshotCleanupEnabled(getRpcController(), req).getEnabled(); 4509 } 4510 }); 4511 4512 } 4513 4514 private List<LogEntry> getSlowLogResponses(final Map<String, Object> filterParams, 4515 final Set<ServerName> serverNames, final int limit, final String logType) { 4516 if (CollectionUtils.isEmpty(serverNames)) { 4517 return Collections.emptyList(); 4518 } 4519 return serverNames.stream().map(serverName -> { 4520 try { 4521 return getSlowLogResponseFromServer(serverName, filterParams, limit, logType); 4522 } catch (IOException e) { 4523 throw new RuntimeException(e); 4524 } 4525 }).flatMap(List::stream).collect(Collectors.toList()); 4526 } 4527 4528 private List<LogEntry> getSlowLogResponseFromServer(ServerName serverName, 4529 Map<String, Object> filterParams, int limit, String logType) throws IOException { 4530 AdminService.BlockingInterface admin = this.connection.getAdmin(serverName); 4531 return executeCallable(new RpcRetryingCallable<List<LogEntry>>() { 4532 @Override 4533 protected List<LogEntry> rpcCall(int callTimeout) throws Exception { 4534 HBaseRpcController controller = rpcControllerFactory.newController(); 4535 HBaseProtos.LogRequest logRequest = 4536 RequestConverter.buildSlowLogResponseRequest(filterParams, limit, logType); 4537 HBaseProtos.LogEntry logEntry = admin.getLogEntries(controller, logRequest); 4538 return ProtobufUtil.toSlowLogPayloads(logEntry); 4539 } 4540 }); 4541 } 4542 4543 @Override 4544 public List<Boolean> clearSlowLogResponses(@Nullable final Set<ServerName> serverNames) 4545 throws IOException { 4546 if (CollectionUtils.isEmpty(serverNames)) { 4547 return Collections.emptyList(); 4548 } 4549 return serverNames.stream().map(serverName -> { 4550 try { 4551 return clearSlowLogsResponses(serverName); 4552 } catch (IOException e) { 4553 throw new RuntimeException(e); 4554 } 4555 }).collect(Collectors.toList()); 4556 } 4557 4558 @Override 4559 public List<LogEntry> getLogEntries(Set<ServerName> serverNames, String logType, 4560 ServerType serverType, int limit, Map<String, Object> filterParams) throws IOException { 4561 if (logType == null || serverType == null) { 4562 throw new IllegalArgumentException("logType and/or serverType cannot be empty"); 4563 } 4564 if (logType.equals("SLOW_LOG") || logType.equals("LARGE_LOG")) { 4565 if (ServerType.MASTER.equals(serverType)) { 4566 throw new IllegalArgumentException("Slow/Large logs are not maintained by HMaster"); 4567 } 4568 return getSlowLogResponses(filterParams, serverNames, limit, logType); 4569 } else if (logType.equals("BALANCER_DECISION")) { 4570 if (ServerType.REGION_SERVER.equals(serverType)) { 4571 throw new IllegalArgumentException( 4572 "Balancer Decision logs are not maintained by HRegionServer"); 4573 } 4574 return getBalancerDecisions(limit); 4575 } else if (logType.equals("BALANCER_REJECTION")) { 4576 if (ServerType.REGION_SERVER.equals(serverType)) { 4577 throw new IllegalArgumentException( 4578 "Balancer Rejection logs are not maintained by HRegionServer"); 4579 } 4580 return getBalancerRejections(limit); 4581 } 4582 return Collections.emptyList(); 4583 } 4584 4585 @Override 4586 public void flushMasterStore() throws IOException { 4587 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 4588 @Override 4589 protected Void rpcCall() throws Exception { 4590 FlushMasterStoreRequest request = FlushMasterStoreRequest.newBuilder().build(); 4591 master.flushMasterStore(getRpcController(), request); 4592 return null; 4593 } 4594 }); 4595 } 4596 4597 private List<LogEntry> getBalancerDecisions(final int limit) throws IOException { 4598 return executeCallable( 4599 new MasterCallable<List<LogEntry>>(getConnection(), getRpcControllerFactory()) { 4600 @Override 4601 protected List<LogEntry> rpcCall() throws Exception { 4602 HBaseProtos.LogEntry logEntry = 4603 master.getLogEntries(getRpcController(), ProtobufUtil.toBalancerDecisionRequest(limit)); 4604 return ProtobufUtil.toBalancerDecisionResponse(logEntry); 4605 } 4606 }); 4607 } 4608 4609 private List<LogEntry> getBalancerRejections(final int limit) throws IOException { 4610 return executeCallable( 4611 new MasterCallable<List<LogEntry>>(getConnection(), getRpcControllerFactory()) { 4612 @Override 4613 protected List<LogEntry> rpcCall() throws Exception { 4614 HBaseProtos.LogEntry logEntry = master.getLogEntries(getRpcController(), 4615 ProtobufUtil.toBalancerRejectionRequest(limit)); 4616 return ProtobufUtil.toBalancerRejectionResponse(logEntry); 4617 } 4618 }); 4619 } 4620 4621 private Boolean clearSlowLogsResponses(final ServerName serverName) throws IOException { 4622 AdminService.BlockingInterface admin = this.connection.getAdmin(serverName); 4623 return executeCallable(new RpcRetryingCallable<Boolean>() { 4624 @Override 4625 protected Boolean rpcCall(int callTimeout) throws Exception { 4626 HBaseRpcController controller = rpcControllerFactory.newController(); 4627 AdminProtos.ClearSlowLogResponses clearSlowLogResponses = admin 4628 .clearSlowLogsResponses(controller, RequestConverter.buildClearSlowLogResponseRequest()); 4629 return ProtobufUtil.toClearSlowLogPayload(clearSlowLogResponses); 4630 } 4631 }); 4632 } 4633 4634 @Override 4635 public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) 4636 throws IOException { 4637 ReplicationPeerModificationSwitchRequest request = 4638 ReplicationPeerModificationSwitchRequest.newBuilder().setOn(on).build(); 4639 boolean prevOn = 4640 executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 4641 @Override 4642 protected Boolean rpcCall() throws Exception { 4643 return master.replicationPeerModificationSwitch(getRpcController(), request) 4644 .getPreviousValue(); 4645 } 4646 }); 4647 // if we do not need to wait all previous peer modification procedure done, or we are enabling 4648 // peer modification, just return here. 4649 if (!drainProcedures || on) { 4650 return prevOn; 4651 } 4652 // otherwise we need to wait until all previous peer modification procedure done 4653 for (int retry = 0;; retry++) { 4654 List<ProcedureProtos.Procedure> procs = 4655 executeCallable(new MasterCallable<List<ProcedureProtos.Procedure>>(getConnection(), 4656 getRpcControllerFactory()) { 4657 @Override 4658 protected List<ProcedureProtos.Procedure> rpcCall() throws Exception { 4659 return master 4660 .getReplicationPeerModificationProcedures(getRpcController(), 4661 GetReplicationPeerModificationProceduresRequest.getDefaultInstance()) 4662 .getProcedureList(); 4663 } 4664 }); 4665 if (procs.isEmpty()) { 4666 return prevOn; 4667 } 4668 try { 4669 Thread.sleep(ConnectionUtils.getPauseTime(pause, retry)); 4670 } catch (InterruptedException e) { 4671 throw (IOException) new InterruptedIOException().initCause(e); 4672 } 4673 } 4674 } 4675 4676 @Override 4677 public boolean isReplicationPeerModificationEnabled() throws IOException { 4678 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 4679 @Override 4680 protected Boolean rpcCall() throws Exception { 4681 return master.isReplicationPeerModificationEnabled(getRpcController(), 4682 IsReplicationPeerModificationEnabledRequest.getDefaultInstance()).getEnabled(); 4683 } 4684 }); 4685 } 4686}