001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import java.io.IOException; 021import java.util.ArrayList; 022import java.util.Arrays; 023import java.util.Collection; 024import java.util.Collections; 025import java.util.HashMap; 026import java.util.HashSet; 027import java.util.List; 028import java.util.Map; 029import java.util.Objects; 030import java.util.Optional; 031import java.util.Set; 032import java.util.TreeMap; 033import java.util.TreeSet; 034import java.util.function.Function; 035import java.util.regex.Matcher; 036import java.util.regex.Pattern; 037import org.apache.hadoop.fs.Path; 038import org.apache.hadoop.hbase.Coprocessor; 039import org.apache.hadoop.hbase.HConstants; 040import org.apache.hadoop.hbase.TableName; 041import org.apache.hadoop.hbase.exceptions.DeserializationException; 042import org.apache.hadoop.hbase.exceptions.HBaseException; 043import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; 044import org.apache.hadoop.hbase.security.User; 045import org.apache.hadoop.hbase.util.Bytes; 046import org.apache.hadoop.hbase.util.PrettyPrinter; 047import org.apache.yetus.audience.InterfaceAudience; 048import org.slf4j.Logger; 049import org.slf4j.LoggerFactory; 050 051import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 052import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 053 054/** 055 * Convenience class for composing an instance of {@link TableDescriptor}. 056 * @since 2.0.0 057 */ 058@InterfaceAudience.Public 059public class TableDescriptorBuilder { 060 public static final Logger LOG = LoggerFactory.getLogger(TableDescriptorBuilder.class); 061 @InterfaceAudience.Private 062 public static final String SPLIT_POLICY = "SPLIT_POLICY"; 063 private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY)); 064 /** 065 * Used by HBase Shell interface to access this metadata attribute which denotes the maximum size 066 * of the store file after which a region split occurs. 067 */ 068 @InterfaceAudience.Private 069 public static final String MAX_FILESIZE = "MAX_FILESIZE"; 070 private static final Bytes MAX_FILESIZE_KEY = new Bytes(Bytes.toBytes(MAX_FILESIZE)); 071 072 @InterfaceAudience.Private 073 public static final String OWNER = "OWNER"; 074 @InterfaceAudience.Private 075 public static final Bytes OWNER_KEY = new Bytes(Bytes.toBytes(OWNER)); 076 077 /** 078 * Used by rest interface to access this metadata attribute which denotes if the table is Read 079 * Only. 080 */ 081 @InterfaceAudience.Private 082 public static final String READONLY = "READONLY"; 083 private static final Bytes READONLY_KEY = new Bytes(Bytes.toBytes(READONLY)); 084 085 /** 086 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 087 * compaction enabled. 088 */ 089 @InterfaceAudience.Private 090 public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED"; 091 private static final Bytes COMPACTION_ENABLED_KEY = new Bytes(Bytes.toBytes(COMPACTION_ENABLED)); 092 093 /** 094 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 095 * split enabled. 096 */ 097 @InterfaceAudience.Private 098 public static final String SPLIT_ENABLED = "SPLIT_ENABLED"; 099 private static final Bytes SPLIT_ENABLED_KEY = new Bytes(Bytes.toBytes(SPLIT_ENABLED)); 100 101 /** 102 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 103 * merge enabled. 104 */ 105 @InterfaceAudience.Private 106 public static final String MERGE_ENABLED = "MERGE_ENABLED"; 107 private static final Bytes MERGE_ENABLED_KEY = new Bytes(Bytes.toBytes(MERGE_ENABLED)); 108 109 /** 110 * Used by HBase Shell interface to access this metadata attribute which represents the maximum 111 * size of the memstore after which its contents are flushed onto the disk. 112 */ 113 @InterfaceAudience.Private 114 public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE"; 115 private static final Bytes MEMSTORE_FLUSHSIZE_KEY = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); 116 117 @InterfaceAudience.Private 118 public static final String FLUSH_POLICY = "FLUSH_POLICY"; 119 private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY)); 120 /** 121 * Used by rest interface to access this metadata attribute which denotes if it is a catalog 122 * table, either <code> hbase:meta </code>. 123 */ 124 @InterfaceAudience.Private 125 public static final String IS_META = "IS_META"; 126 private static final Bytes IS_META_KEY = new Bytes(Bytes.toBytes(IS_META)); 127 128 /** 129 * {@link Durability} setting for the table. 130 */ 131 @InterfaceAudience.Private 132 public static final String DURABILITY = "DURABILITY"; 133 private static final Bytes DURABILITY_KEY = new Bytes(Bytes.toBytes("DURABILITY")); 134 135 /** 136 * The number of region replicas for the table. 137 */ 138 @InterfaceAudience.Private 139 public static final String REGION_REPLICATION = "REGION_REPLICATION"; 140 private static final Bytes REGION_REPLICATION_KEY = new Bytes(Bytes.toBytes(REGION_REPLICATION)); 141 142 /** 143 * The flag to indicate whether or not the memstore should be replicated for read-replicas 144 * (CONSISTENCY => TIMELINE). 145 */ 146 @InterfaceAudience.Private 147 public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION"; 148 private static final Bytes REGION_MEMSTORE_REPLICATION_KEY = 149 new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); 150 151 /** 152 * If non-null, the HDFS erasure coding policy to set on the data dir of the table 153 */ 154 public static final String ERASURE_CODING_POLICY = "ERASURE_CODING_POLICY"; 155 private static final Bytes ERASURE_CODING_POLICY_KEY = 156 new Bytes(Bytes.toBytes(ERASURE_CODING_POLICY)); 157 158 private static final String DEFAULT_ERASURE_CODING_POLICY = null; 159 /** 160 * Used by shell/rest interface to access this metadata attribute which denotes if the table 161 * should be treated by region normalizer. 162 */ 163 @InterfaceAudience.Private 164 public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED"; 165 private static final Bytes NORMALIZATION_ENABLED_KEY = 166 new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED)); 167 @InterfaceAudience.Private 168 @Deprecated 169 public static final boolean DEFAULT_NORMALIZATION_ENABLED = false; 170 171 @InterfaceAudience.Private 172 public static final String NORMALIZER_TARGET_REGION_COUNT = "NORMALIZER_TARGET_REGION_COUNT"; 173 private static final Bytes NORMALIZER_TARGET_REGION_COUNT_KEY = 174 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT)); 175 176 @InterfaceAudience.Private 177 public static final String NORMALIZER_TARGET_REGION_SIZE_MB = "NORMALIZER_TARGET_REGION_SIZE_MB"; 178 private static final Bytes NORMALIZER_TARGET_REGION_SIZE_MB_KEY = 179 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE_MB)); 180 // TODO: Keeping backward compatability with HBASE-25651 change. Can be removed in later version 181 @InterfaceAudience.Private 182 @Deprecated 183 public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE"; 184 @Deprecated 185 private static final Bytes NORMALIZER_TARGET_REGION_SIZE_KEY = 186 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE)); 187 188 /** 189 * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value 190 */ 191 private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT; 192 193 @InterfaceAudience.Private 194 public static final String PRIORITY = "PRIORITY"; 195 private static final Bytes PRIORITY_KEY = new Bytes(Bytes.toBytes(PRIORITY)); 196 197 private static final Bytes RSGROUP_KEY = 198 new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP)); 199 200 /** 201 * Relative priority of the table used for rpc scheduling 202 */ 203 private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS; 204 205 /** 206 * Constant that denotes whether the table is READONLY by default and is false 207 */ 208 public static final boolean DEFAULT_READONLY = false; 209 210 /** 211 * Constant that denotes whether the table is compaction enabled by default 212 */ 213 public static final boolean DEFAULT_COMPACTION_ENABLED = true; 214 215 /** 216 * Constant that denotes whether the table is split enabled by default 217 */ 218 public static final boolean DEFAULT_SPLIT_ENABLED = true; 219 220 /** 221 * Constant that denotes whether the table is merge enabled by default 222 */ 223 public static final boolean DEFAULT_MERGE_ENABLED = true; 224 225 /** 226 * Constant that denotes the maximum default size of the memstore in bytes after which the 227 * contents are flushed to the store files. 228 */ 229 public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L; 230 231 public static final int DEFAULT_REGION_REPLICATION = 1; 232 233 public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true; 234 235 private final static Map<String, String> DEFAULT_VALUES = new HashMap<>(); 236 private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>(); 237 238 static { 239 DEFAULT_VALUES.put(MAX_FILESIZE, String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); 240 DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY)); 241 DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); 242 DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); // use the enum name 243 DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); 244 DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); 245 // Setting ERASURE_CODING_POLICY to NULL so that it is not considered as metadata 246 DEFAULT_VALUES.put(ERASURE_CODING_POLICY, String.valueOf(DEFAULT_ERASURE_CODING_POLICY)); 247 DEFAULT_VALUES.keySet().stream().map(s -> new Bytes(Bytes.toBytes(s))) 248 .forEach(RESERVED_KEYWORDS::add); 249 RESERVED_KEYWORDS.add(IS_META_KEY); 250 } 251 252 public static PrettyPrinter.Unit getUnit(String key) { 253 switch (key) { 254 case MAX_FILESIZE: 255 case MEMSTORE_FLUSHSIZE: 256 return PrettyPrinter.Unit.BYTE; 257 default: 258 return PrettyPrinter.Unit.NONE; 259 } 260 } 261 262 @InterfaceAudience.Private 263 public final static String NAMESPACE_FAMILY_INFO = "info"; 264 @InterfaceAudience.Private 265 public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); 266 @InterfaceAudience.Private 267 public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); 268 269 /** 270 * <pre> 271 * Pattern that matches a coprocessor specification. Form is: 272 * {@code <coprocessor jar file location> '|' <class name> ['|' <priority> ['|' <arguments>]]} 273 * where arguments are {@code <KEY> '=' <VALUE> [,...]} 274 * For example: {@code hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2} 275 * </pre> 276 */ 277 private static final Pattern CP_HTD_ATTR_VALUE_PATTERN = 278 Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$"); 279 280 private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+"; 281 private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+"; 282 private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile("(" 283 + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); 284 private static final Pattern CP_HTD_ATTR_KEY_PATTERN = 285 Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); 286 287 /** 288 * Table descriptor for namespace table 289 */ 290 // TODO We used to set CacheDataInL1 for NS table. When we have BucketCache in file mode, now the 291 // NS data goes to File mode BC only. Test how that affect the system. If too much, we have to 292 // rethink about adding back the setCacheDataInL1 for NS table. 293 // Note: namespace schema is hard-coded. In hbase3, namespace goes away; it is integrated into 294 // hbase:meta. 295 public static final TableDescriptor NAMESPACE_TABLEDESC = 296 TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME) 297 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES) 298 // Ten is arbitrary number. Keep versions to help debugging. 299 .setMaxVersions(10).setInMemory(true).setBlocksize(8 * 1024) 300 .setScope(HConstants.REPLICATION_SCOPE_LOCAL).build()) 301 .build(); 302 303 private final ModifyableTableDescriptor desc; 304 305 /** Returns This instance serialized with pb with pb magic prefix */ 306 public static byte[] toByteArray(TableDescriptor desc) { 307 if (desc instanceof ModifyableTableDescriptor) { 308 return ((ModifyableTableDescriptor) desc).toByteArray(); 309 } 310 return new ModifyableTableDescriptor(desc).toByteArray(); 311 } 312 313 /** 314 * The input should be created by {@link #toByteArray}. 315 * @param pbBytes A pb serialized TableDescriptor instance with pb magic prefix 316 * @return This instance serialized with pb with pb magic prefix 317 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException if an error occurred 318 */ 319 public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException { 320 return ModifyableTableDescriptor.parseFrom(pbBytes); 321 } 322 323 public static TableDescriptorBuilder newBuilder(final TableName name) { 324 return new TableDescriptorBuilder(name); 325 } 326 327 public static TableDescriptor copy(TableDescriptor desc) { 328 return new ModifyableTableDescriptor(desc); 329 } 330 331 public static TableDescriptor copy(TableName name, TableDescriptor desc) { 332 return new ModifyableTableDescriptor(name, desc); 333 } 334 335 /** 336 * Copy all values, families, and name from the input. 337 * @param desc The desciptor to copy 338 * @return A clone of input 339 */ 340 public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) { 341 return new TableDescriptorBuilder(desc); 342 } 343 344 private TableDescriptorBuilder(final TableName name) { 345 this.desc = new ModifyableTableDescriptor(name); 346 } 347 348 private TableDescriptorBuilder(final TableDescriptor desc) { 349 this.desc = new ModifyableTableDescriptor(desc); 350 } 351 352 /** 353 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use 354 * {@link #setCoprocessor(String)} instead 355 */ 356 @Deprecated 357 public TableDescriptorBuilder addCoprocessor(String className) throws IOException { 358 return addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null); 359 } 360 361 /** 362 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use 363 * {@link #setCoprocessor(CoprocessorDescriptor)} instead 364 */ 365 @Deprecated 366 public TableDescriptorBuilder addCoprocessor(String className, Path jarFilePath, int priority, 367 final Map<String, String> kvs) throws IOException { 368 desc.setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className) 369 .setJarPath(jarFilePath == null ? null : jarFilePath.toString()).setPriority(priority) 370 .setProperties(kvs == null ? Collections.emptyMap() : kvs).build()); 371 return this; 372 } 373 374 /** 375 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use 376 * {@link #setCoprocessor(CoprocessorDescriptor)} instead 377 */ 378 @Deprecated 379 public TableDescriptorBuilder addCoprocessorWithSpec(final String specStr) throws IOException { 380 desc.setCoprocessorWithSpec(specStr); 381 return this; 382 } 383 384 /** 385 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use 386 * {@link #setColumnFamily(ColumnFamilyDescriptor)} instead 387 */ 388 @Deprecated 389 public TableDescriptorBuilder addColumnFamily(final ColumnFamilyDescriptor family) { 390 desc.setColumnFamily(family); 391 return this; 392 } 393 394 public TableDescriptorBuilder setCoprocessor(String className) throws IOException { 395 return setCoprocessor(CoprocessorDescriptorBuilder.of(className)); 396 } 397 398 public TableDescriptorBuilder setCoprocessor(CoprocessorDescriptor cpDesc) throws IOException { 399 desc.setCoprocessor(Objects.requireNonNull(cpDesc)); 400 return this; 401 } 402 403 public TableDescriptorBuilder setCoprocessors(Collection<CoprocessorDescriptor> cpDescs) 404 throws IOException { 405 for (CoprocessorDescriptor cpDesc : cpDescs) { 406 desc.setCoprocessor(cpDesc); 407 } 408 return this; 409 } 410 411 public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor family) { 412 desc.setColumnFamily(Objects.requireNonNull(family)); 413 return this; 414 } 415 416 public TableDescriptorBuilder 417 setColumnFamilies(final Collection<ColumnFamilyDescriptor> families) { 418 families.forEach(desc::setColumnFamily); 419 return this; 420 } 421 422 public TableDescriptorBuilder modifyColumnFamily(final ColumnFamilyDescriptor family) { 423 desc.modifyColumnFamily(Objects.requireNonNull(family)); 424 return this; 425 } 426 427 public TableDescriptorBuilder removeValue(final String key) { 428 desc.removeValue(key); 429 return this; 430 } 431 432 public TableDescriptorBuilder removeValue(Bytes key) { 433 desc.removeValue(key); 434 return this; 435 } 436 437 public TableDescriptorBuilder removeValue(byte[] key) { 438 desc.removeValue(key); 439 return this; 440 } 441 442 public TableDescriptorBuilder removeColumnFamily(final byte[] name) { 443 desc.removeColumnFamily(name); 444 return this; 445 } 446 447 public TableDescriptorBuilder removeCoprocessor(String className) { 448 desc.removeCoprocessor(className); 449 return this; 450 } 451 452 public TableDescriptorBuilder setCompactionEnabled(final boolean isEnable) { 453 desc.setCompactionEnabled(isEnable); 454 return this; 455 } 456 457 public TableDescriptorBuilder setSplitEnabled(final boolean isEnable) { 458 desc.setSplitEnabled(isEnable); 459 return this; 460 } 461 462 public TableDescriptorBuilder setMergeEnabled(final boolean isEnable) { 463 desc.setMergeEnabled(isEnable); 464 return this; 465 } 466 467 public TableDescriptorBuilder setDurability(Durability durability) { 468 desc.setDurability(durability); 469 return this; 470 } 471 472 public TableDescriptorBuilder setFlushPolicyClassName(String clazz) { 473 desc.setFlushPolicyClassName(clazz); 474 return this; 475 } 476 477 public TableDescriptorBuilder setMaxFileSize(long maxFileSize) { 478 desc.setMaxFileSize(maxFileSize); 479 return this; 480 } 481 482 public TableDescriptorBuilder setMaxFileSize(String maxFileSize) throws HBaseException { 483 desc.setMaxFileSize(maxFileSize); 484 return this; 485 } 486 487 public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) { 488 desc.setMemStoreFlushSize(memstoreFlushSize); 489 return this; 490 } 491 492 public TableDescriptorBuilder setMemStoreFlushSize(String memStoreFlushSize) 493 throws HBaseException { 494 desc.setMemStoreFlushSize(memStoreFlushSize); 495 return this; 496 } 497 498 public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) { 499 desc.setNormalizerTargetRegionCount(regionCount); 500 return this; 501 } 502 503 public TableDescriptorBuilder setNormalizerTargetRegionSize(final long regionSize) { 504 desc.setNormalizerTargetRegionSize(regionSize); 505 return this; 506 } 507 508 public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) { 509 desc.setNormalizationEnabled(isEnable); 510 return this; 511 } 512 513 /** 514 * Set the table owner 515 * @deprecated since 2.0.0 and will be removed in 3.0.0. 516 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 517 */ 518 @Deprecated 519 public TableDescriptorBuilder setOwner(User owner) { 520 desc.setOwner(owner); 521 return this; 522 } 523 524 /** 525 * Set the table owner 526 * @deprecated since 2.0.0 and will be removed in 3.0.0. 527 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 528 */ 529 @Deprecated 530 public TableDescriptorBuilder setOwnerString(String ownerString) { 531 desc.setOwnerString(ownerString); 532 return this; 533 } 534 535 public TableDescriptorBuilder setPriority(int priority) { 536 desc.setPriority(priority); 537 return this; 538 } 539 540 public TableDescriptorBuilder setReadOnly(final boolean readOnly) { 541 desc.setReadOnly(readOnly); 542 return this; 543 } 544 545 public TableDescriptorBuilder setErasureCodingPolicy(String policy) { 546 desc.setErasureCodingPolicy(policy); 547 return this; 548 } 549 550 public TableDescriptorBuilder setRegionMemStoreReplication(boolean memstoreReplication) { 551 desc.setRegionMemStoreReplication(memstoreReplication); 552 return this; 553 } 554 555 public TableDescriptorBuilder setRegionReplication(int regionReplication) { 556 desc.setRegionReplication(regionReplication); 557 return this; 558 } 559 560 public TableDescriptorBuilder setRegionSplitPolicyClassName(String clazz) { 561 desc.setRegionSplitPolicyClassName(clazz); 562 return this; 563 } 564 565 public TableDescriptorBuilder setValue(final String key, final String value) { 566 desc.setValue(key, value); 567 return this; 568 } 569 570 public TableDescriptorBuilder setValue(final Bytes key, final Bytes value) { 571 desc.setValue(key, value); 572 return this; 573 } 574 575 public TableDescriptorBuilder setValue(final byte[] key, final byte[] value) { 576 desc.setValue(key, value); 577 return this; 578 } 579 580 public String getValue(String key) { 581 return desc.getValue(key); 582 } 583 584 /** 585 * Sets replication scope all & only the columns already in the builder. Columns added later won't 586 * be backfilled with replication scope. 587 * @param scope replication scope 588 * @return a TableDescriptorBuilder 589 */ 590 public TableDescriptorBuilder setReplicationScope(int scope) { 591 Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 592 newFamilies.putAll(desc.families); 593 newFamilies.forEach((cf, cfDesc) -> { 594 desc.removeColumnFamily(cf); 595 desc 596 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope).build()); 597 }); 598 return this; 599 } 600 601 /** 602 * Set the RSGroup for this table, specified RSGroup must exist before create or modify table. 603 * @param group rsgroup name 604 * @return a TableDescriptorBuilder 605 */ 606 public TableDescriptorBuilder setRegionServerGroup(String group) { 607 desc.setValue(RSGROUP_KEY, group); 608 return this; 609 } 610 611 public TableDescriptor build() { 612 return new ModifyableTableDescriptor(desc); 613 } 614 615 /** 616 * TODO: make this private after removing the HTableDescriptor 617 */ 618 @InterfaceAudience.Private 619 public static class ModifyableTableDescriptor 620 implements TableDescriptor, Comparable<ModifyableTableDescriptor> { 621 622 private final TableName name; 623 624 /** 625 * A map which holds the metadata information of the table. This metadata includes values like 626 * IS_META, SPLIT_POLICY, MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc... 627 */ 628 private final Map<Bytes, Bytes> values = new HashMap<>(); 629 630 /** 631 * Maps column family name to the respective FamilyDescriptors 632 */ 633 private final Map<byte[], ColumnFamilyDescriptor> families = 634 new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 635 636 /** 637 * Construct a table descriptor specifying a TableName object 638 * @param name Table name. TODO: make this private after removing the HTableDescriptor 639 */ 640 @InterfaceAudience.Private 641 public ModifyableTableDescriptor(final TableName name) { 642 this(name, Collections.EMPTY_LIST, Collections.EMPTY_MAP); 643 } 644 645 private ModifyableTableDescriptor(final TableDescriptor desc) { 646 this(desc.getTableName(), Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 647 } 648 649 /** 650 * Construct a table descriptor by cloning the descriptor passed as a parameter. 651 * <p> 652 * Makes a deep copy of the supplied descriptor. 653 * @param name The new name 654 * @param desc The descriptor. TODO: make this private after removing the HTableDescriptor 655 */ 656 @InterfaceAudience.Private 657 @Deprecated // only used by HTableDescriptor. remove this method if HTD is removed 658 public ModifyableTableDescriptor(final TableName name, final TableDescriptor desc) { 659 this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 660 } 661 662 private ModifyableTableDescriptor(final TableName name, 663 final Collection<ColumnFamilyDescriptor> families, Map<Bytes, Bytes> values) { 664 this.name = name; 665 families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); 666 this.values.putAll(values); 667 this.values.put(IS_META_KEY, 668 new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME))))); 669 } 670 671 /** 672 * Checks if this table is <code> hbase:meta </code> region. 673 * @return true if this table is <code> hbase:meta </code> region 674 */ 675 @Override 676 public boolean isMetaRegion() { 677 return getOrDefault(IS_META_KEY, Boolean::valueOf, false); 678 } 679 680 /** 681 * Checks if the table is a <code>hbase:meta</code> table 682 * @return true if table is <code> hbase:meta </code> region. 683 */ 684 @Override 685 public boolean isMetaTable() { 686 return isMetaRegion(); 687 } 688 689 @Override 690 public Bytes getValue(Bytes key) { 691 Bytes rval = values.get(key); 692 return rval == null ? null : new Bytes(rval.copyBytes()); 693 } 694 695 @Override 696 public String getValue(String key) { 697 Bytes rval = values.get(new Bytes(Bytes.toBytes(key))); 698 return rval == null ? null : Bytes.toString(rval.get(), rval.getOffset(), rval.getLength()); 699 } 700 701 @Override 702 public byte[] getValue(byte[] key) { 703 Bytes value = values.get(new Bytes(key)); 704 return value == null ? null : value.copyBytes(); 705 } 706 707 private <T> T getOrDefault(Bytes key, Function<String, T> function, T defaultValue) { 708 Bytes value = values.get(key); 709 if (value == null) { 710 return defaultValue; 711 } else { 712 return function.apply(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 713 } 714 } 715 716 /** 717 * Getter for fetching an unmodifiable {@link #values} map. 718 * @return unmodifiable map {@link #values}. 719 * @see #values 720 */ 721 @Override 722 public Map<Bytes, Bytes> getValues() { 723 // shallow pointer copy 724 return Collections.unmodifiableMap(values); 725 } 726 727 /** 728 * Setter for storing metadata as a (key, value) pair in {@link #values} map 729 * @param key The key. 730 * @param value The value. If null, removes the setting. 731 * @return the modifyable TD 732 * @see #values 733 */ 734 public ModifyableTableDescriptor setValue(byte[] key, byte[] value) { 735 return setValue(toBytesOrNull(key, v -> v), toBytesOrNull(value, v -> v)); 736 } 737 738 public ModifyableTableDescriptor setValue(String key, String value) { 739 return setValue(toBytesOrNull(key, Bytes::toBytes), toBytesOrNull(value, Bytes::toBytes)); 740 } 741 742 /** 743 * @param key The key. 744 * @param value The value. If null, removes the setting. 745 */ 746 private ModifyableTableDescriptor setValue(final Bytes key, final String value) { 747 return setValue(key, toBytesOrNull(value, Bytes::toBytes)); 748 } 749 750 /** 751 * Setter for storing metadata as a (key, value) pair in {@link #values} map 752 * @param key The key. 753 * @param value The value. If null, removes the setting. 754 */ 755 public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { 756 if (value == null || value.getLength() == 0) { 757 values.remove(key); 758 } else { 759 values.put(key, value); 760 } 761 return this; 762 } 763 764 private static <T> Bytes toBytesOrNull(T t, Function<T, byte[]> f) { 765 if (t == null) { 766 return null; 767 } else { 768 return new Bytes(f.apply(t)); 769 } 770 } 771 772 /** 773 * Remove metadata represented by the key from the {@link #values} map 774 * @param key Key whose key and value we're to remove from TableDescriptor parameters. 775 * @return the modifyable TD 776 */ 777 public ModifyableTableDescriptor removeValue(final String key) { 778 return setValue(key, (String) null); 779 } 780 781 /** 782 * Remove metadata represented by the key from the {@link #values} map 783 * @param key Key whose key and value we're to remove from TableDescriptor parameters. 784 * @return the modifyable TD 785 */ 786 public ModifyableTableDescriptor removeValue(Bytes key) { 787 return setValue(key, (Bytes) null); 788 } 789 790 /** 791 * Remove metadata represented by the key from the {@link #values} map 792 * @param key Key whose key and value we're to remove from TableDescriptor parameters. 793 * @return the modifyable TD 794 */ 795 public ModifyableTableDescriptor removeValue(final byte[] key) { 796 return removeValue(new Bytes(key)); 797 } 798 799 /** 800 * Check if the readOnly flag of the table is set. If the readOnly flag is set then the contents 801 * of the table can only be read from but not modified. 802 * @return true if all columns in the table should be read only 803 */ 804 @Override 805 public boolean isReadOnly() { 806 return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY); 807 } 808 809 /** 810 * Setting the table as read only sets all the columns in the table as read only. By default all 811 * tables are modifiable, but if the readOnly flag is set to true then the contents of the table 812 * can only be read but not modified. 813 * @param readOnly True if all of the columns in the table should be read only. 814 * @return the modifyable TD 815 */ 816 public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { 817 return setValue(READONLY_KEY, Boolean.toString(readOnly)); 818 } 819 820 /** 821 * The HDFS erasure coding policy for a table. This will be set on the data dir of the table, 822 * and is an alternative to normal replication which takes less space at the cost of locality. 823 * @return the current policy, or null if undefined 824 */ 825 @Override 826 public String getErasureCodingPolicy() { 827 return getValue(ERASURE_CODING_POLICY); 828 } 829 830 /** 831 * Sets the HDFS erasure coding policy for the table. This will be propagated to HDFS for the 832 * data dir of the table. Erasure coding is an alternative to normal replication which takes 833 * less space at the cost of locality. The policy must be available and enabled on the hdfs 834 * cluster before being set. 835 * @param policy the policy to set, or null to disable erasure coding 836 * @return the modifyable TD 837 */ 838 public ModifyableTableDescriptor setErasureCodingPolicy(String policy) { 839 return setValue(ERASURE_CODING_POLICY_KEY, policy); 840 } 841 842 /** 843 * Check if the compaction enable flag of the table is true. If flag is false then no 844 * minor/major compactions will be done in real. 845 * @return true if table compaction enabled 846 */ 847 @Override 848 public boolean isCompactionEnabled() { 849 return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED); 850 } 851 852 /** 853 * Setting the table compaction enable flag. 854 * @param isEnable True if enable compaction. 855 * @return the modifyable TD 856 */ 857 public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) { 858 return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable)); 859 } 860 861 /** 862 * Check if the split enable flag of the table is true. If flag is false then no split will be 863 * done. 864 * @return true if table region split enabled 865 */ 866 @Override 867 public boolean isSplitEnabled() { 868 return getOrDefault(SPLIT_ENABLED_KEY, Boolean::valueOf, DEFAULT_SPLIT_ENABLED); 869 } 870 871 /** 872 * Setting the table region split enable flag. 873 * @param isEnable True if enable region split. 874 * @return the modifyable TD 875 */ 876 public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) { 877 return setValue(SPLIT_ENABLED_KEY, Boolean.toString(isEnable)); 878 } 879 880 /** 881 * Check if the region merge enable flag of the table is true. If flag is false then no merge 882 * will be done. 883 * @return true if table region merge enabled 884 */ 885 @Override 886 public boolean isMergeEnabled() { 887 return getOrDefault(MERGE_ENABLED_KEY, Boolean::valueOf, DEFAULT_MERGE_ENABLED); 888 } 889 890 /** 891 * Setting the table region merge enable flag. 892 * @param isEnable True if enable region merge. 893 * @return the modifyable TD 894 */ 895 public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) { 896 return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable)); 897 } 898 899 /** 900 * Check if normalization enable flag of the table is true. If flag is false then no region 901 * normalizer won't attempt to normalize this table. 902 * @return true if region normalization is enabled for this table 903 **/ 904 @Override 905 public boolean isNormalizationEnabled() { 906 return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, false); 907 } 908 909 /** 910 * Check if there is the target region count. If so, the normalize plan will be calculated based 911 * on the target region count. 912 * @return target region count after normalize done 913 */ 914 @Override 915 public int getNormalizerTargetRegionCount() { 916 return getOrDefault(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer::valueOf, 917 Integer.valueOf(-1)); 918 } 919 920 /** 921 * Check if there is the target region size. If so, the normalize plan will be calculated based 922 * on the target region size. 923 * @return target region size after normalize done 924 */ 925 @Override 926 public long getNormalizerTargetRegionSize() { 927 long target_region_size = 928 getOrDefault(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long::valueOf, Long.valueOf(-1)); 929 return target_region_size == Long.valueOf(-1) 930 ? getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1)) 931 : target_region_size; 932 } 933 934 /** 935 * Setting the table normalization enable flag. 936 * @param isEnable True if enable normalization. 937 * @return the modifyable TD 938 */ 939 public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) { 940 return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable)); 941 } 942 943 /** 944 * Setting the target region count of table normalization . 945 * @param regionCount the target region count. 946 * @return the modifyable TD 947 */ 948 public ModifyableTableDescriptor setNormalizerTargetRegionCount(final int regionCount) { 949 return setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount)); 950 } 951 952 /** 953 * Setting the target region size of table normalization. 954 * @param regionSize the target region size. 955 * @return the modifyable TD 956 */ 957 public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long regionSize) { 958 return setValue(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long.toString(regionSize)); 959 } 960 961 /** 962 * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT. 963 * @param durability enum value 964 * @return the modifyable TD 965 */ 966 public ModifyableTableDescriptor setDurability(Durability durability) { 967 return setValue(DURABILITY_KEY, durability.name()); 968 } 969 970 /** 971 * Returns the durability setting for the table. 972 * @return durability setting for the table. 973 */ 974 @Override 975 public Durability getDurability() { 976 return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY); 977 } 978 979 /** 980 * Get the name of the table 981 */ 982 @Override 983 public TableName getTableName() { 984 return name; 985 } 986 987 /** 988 * This sets the class associated with the region split policy which determines when a region 989 * split should occur. The class used by default is defined in 990 * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 991 * @param clazz the class name 992 * @return the modifyable TD 993 */ 994 public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) { 995 return setValue(SPLIT_POLICY_KEY, clazz); 996 } 997 998 /** 999 * This gets the class associated with the region split policy which determines when a region 1000 * split should occur. The class used by default is defined in 1001 * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 1002 * @return the class name of the region split policy for this table. If this returns null, the 1003 * default split policy is used. 1004 */ 1005 @Override 1006 public String getRegionSplitPolicyClassName() { 1007 return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null); 1008 } 1009 1010 /** 1011 * Returns the maximum size upto which a region can grow to after which a region split is 1012 * triggered. The region size is represented by the size of the biggest store file in that 1013 * region. 1014 * @return max hregion size for table, -1 if not set. 1015 * @see #setMaxFileSize(long) 1016 */ 1017 @Override 1018 public long getMaxFileSize() { 1019 return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1); 1020 } 1021 1022 /** 1023 * Sets the maximum size upto which a region can grow to after which a region split is 1024 * triggered. The region size is represented by the size of the biggest store file in that 1025 * region, i.e. If the biggest store file grows beyond the maxFileSize, then the region split is 1026 * triggered. This defaults to a value of 256 MB. 1027 * <p> 1028 * This is not an absolute value and might vary. Assume that a single row exceeds the 1029 * maxFileSize then the storeFileSize will be greater than maxFileSize since a single row cannot 1030 * be split across multiple regions 1031 * </p> 1032 * @param maxFileSize The maximum file size that a store file can grow to before a split is 1033 * triggered. 1034 * @return the modifyable TD 1035 */ 1036 public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { 1037 return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize)); 1038 } 1039 1040 public ModifyableTableDescriptor setMaxFileSize(String maxFileSize) throws HBaseException { 1041 return setMaxFileSize( 1042 Long.parseLong(PrettyPrinter.valueOf(maxFileSize, PrettyPrinter.Unit.BYTE))); 1043 } 1044 1045 /** 1046 * Returns the size of the memstore after which a flush to filesystem is triggered. 1047 * @return memory cache flush size for each hregion, -1 if not set. 1048 * @see #setMemStoreFlushSize(long) 1049 */ 1050 @Override 1051 public long getMemStoreFlushSize() { 1052 return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1); 1053 } 1054 1055 /** 1056 * Represents the maximum size of the memstore after which the contents of the memstore are 1057 * flushed to the filesystem. This defaults to a size of 64 MB. 1058 * @param memstoreFlushSize memory cache flush size for each hregion 1059 * @return the modifyable TD 1060 */ 1061 public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { 1062 return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize)); 1063 } 1064 1065 public ModifyableTableDescriptor setMemStoreFlushSize(String memStoreFlushSize) 1066 throws HBaseException { 1067 return setMemStoreFlushSize( 1068 Long.parseLong(PrettyPrinter.valueOf(memStoreFlushSize, PrettyPrinter.Unit.BYTE))); 1069 } 1070 1071 /** 1072 * This sets the class associated with the flush policy which determines determines the stores 1073 * need to be flushed when flushing a region. The class used by default is defined in 1074 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 1075 * @param clazz the class name 1076 * @return the modifyable TD 1077 */ 1078 public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) { 1079 return setValue(FLUSH_POLICY_KEY, clazz); 1080 } 1081 1082 /** 1083 * This gets the class associated with the flush policy which determines the stores need to be 1084 * flushed when flushing a region. The class used by default is defined in 1085 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 1086 * @return the class name of the flush policy for this table. If this returns null, the default 1087 * flush policy is used. 1088 */ 1089 @Override 1090 public String getFlushPolicyClassName() { 1091 return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null); 1092 } 1093 1094 /** 1095 * Adds a column family. For the updating purpose please use 1096 * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead. 1097 * @param family to add. 1098 * @return the modifyable TD 1099 */ 1100 public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor family) { 1101 if (family.getName() == null || family.getName().length <= 0) { 1102 throw new IllegalArgumentException("Family name cannot be null or empty"); 1103 } 1104 int flength = family.getName() == null ? 0 : family.getName().length; 1105 if (flength > Byte.MAX_VALUE) { 1106 throw new IllegalArgumentException( 1107 "The length of family name is bigger than " + Byte.MAX_VALUE); 1108 } 1109 if (hasColumnFamily(family.getName())) { 1110 throw new IllegalArgumentException( 1111 "Family '" + family.getNameAsString() + "' already exists so cannot be added"); 1112 } 1113 return putColumnFamily(family); 1114 } 1115 1116 /** 1117 * Modifies the existing column family. 1118 * @param family to update 1119 * @return this (for chained invocation) 1120 */ 1121 public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) { 1122 if (family.getName() == null || family.getName().length <= 0) { 1123 throw new IllegalArgumentException("Family name cannot be null or empty"); 1124 } 1125 if (!hasColumnFamily(family.getName())) { 1126 throw new IllegalArgumentException( 1127 "Column family '" + family.getNameAsString() + "' does not exist"); 1128 } 1129 return putColumnFamily(family); 1130 } 1131 1132 private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) { 1133 families.put(family.getName(), family); 1134 return this; 1135 } 1136 1137 /** 1138 * Checks to see if this table contains the given column family 1139 * @param familyName Family name or column name. 1140 * @return true if the table contains the specified family name 1141 */ 1142 @Override 1143 public boolean hasColumnFamily(final byte[] familyName) { 1144 return families.containsKey(familyName); 1145 } 1146 1147 /** Returns Name of this table and then a map of all of the column family descriptors. */ 1148 @Override 1149 public String toString() { 1150 StringBuilder s = new StringBuilder(); 1151 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1152 s.append(getValues(true)); 1153 families.values().forEach(f -> s.append(", ").append(f)); 1154 return s.toString(); 1155 } 1156 1157 /** 1158 * @return Name of this table and then a map of all of the column family descriptors (with only 1159 * the non-default column family attributes) 1160 */ 1161 @Override 1162 public String toStringCustomizedValues() { 1163 StringBuilder s = new StringBuilder(); 1164 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1165 s.append(getValues(false)); 1166 families.values().forEach(hcd -> s.append(", ").append(hcd.toStringCustomizedValues())); 1167 return s.toString(); 1168 } 1169 1170 /** Returns map of all table attributes formatted into string. */ 1171 public String toStringTableAttributes() { 1172 return getValues(true).toString(); 1173 } 1174 1175 private StringBuilder getValues(boolean printDefaults) { 1176 StringBuilder s = new StringBuilder(); 1177 1178 // step 1: set partitioning and pruning 1179 Set<Bytes> reservedKeys = new TreeSet<>(); 1180 Set<Bytes> userKeys = new TreeSet<>(); 1181 for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) { 1182 if (entry.getKey() == null || entry.getKey().get() == null) { 1183 continue; 1184 } 1185 String key = Bytes.toString(entry.getKey().get()); 1186 // in this section, print out reserved keywords + coprocessor info 1187 if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) { 1188 userKeys.add(entry.getKey()); 1189 continue; 1190 } 1191 // only print out IS_META if true 1192 String value = Bytes.toString(entry.getValue().get()); 1193 if (key.equalsIgnoreCase(IS_META)) { 1194 if (Boolean.valueOf(value) == false) { 1195 continue; 1196 } 1197 } 1198 // see if a reserved key is a default value. may not want to print it out 1199 if ( 1200 printDefaults || !DEFAULT_VALUES.containsKey(key) 1201 || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value) 1202 ) { 1203 reservedKeys.add(entry.getKey()); 1204 } 1205 } 1206 1207 // early exit optimization 1208 boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty(); 1209 if (!hasAttributes) { 1210 return s; 1211 } 1212 1213 s.append(", {"); 1214 // step 2: printing attributes 1215 if (hasAttributes) { 1216 s.append("TABLE_ATTRIBUTES => {"); 1217 1218 // print all reserved keys first 1219 boolean printCommaForAttr = false; 1220 for (Bytes k : reservedKeys) { 1221 String key = Bytes.toString(k.get()); 1222 String value = Bytes.toStringBinary(values.get(k).get()); 1223 if (printCommaForAttr) { 1224 s.append(", "); 1225 } 1226 printCommaForAttr = true; 1227 s.append(key); 1228 s.append(" => "); 1229 s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); 1230 } 1231 1232 if (!userKeys.isEmpty()) { 1233 // print all non-reserved as a separate subset 1234 if (printCommaForAttr) { 1235 s.append(", "); 1236 } 1237 s.append(HConstants.METADATA).append(" => "); 1238 s.append("{"); 1239 boolean printCommaForCfg = false; 1240 for (Bytes k : userKeys) { 1241 String key = Bytes.toString(k.get()); 1242 String value = Bytes.toStringBinary(values.get(k).get()); 1243 if (printCommaForCfg) { 1244 s.append(", "); 1245 } 1246 printCommaForCfg = true; 1247 s.append('\'').append(key).append('\''); 1248 s.append(" => "); 1249 s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); 1250 } 1251 s.append("}"); 1252 } 1253 1254 s.append("}"); 1255 } 1256 1257 s.append("}"); // end METHOD 1258 return s; 1259 } 1260 1261 /** 1262 * Compare the contents of the descriptor with another one passed as a parameter. Checks if the 1263 * obj passed is an instance of ModifyableTableDescriptor, if yes then the contents of the 1264 * descriptors are compared. 1265 * @param obj The object to compare 1266 * @return true if the contents of the the two descriptors exactly match 1267 * @see java.lang.Object#equals(java.lang.Object) 1268 */ 1269 @Override 1270 public boolean equals(Object obj) { 1271 if (this == obj) { 1272 return true; 1273 } 1274 if (obj instanceof ModifyableTableDescriptor) { 1275 return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0; 1276 } 1277 return false; 1278 } 1279 1280 /** Returns hash code */ 1281 @Override 1282 public int hashCode() { 1283 int result = this.name.hashCode(); 1284 if (this.families.size() > 0) { 1285 for (ColumnFamilyDescriptor e : this.families.values()) { 1286 result ^= e.hashCode(); 1287 } 1288 } 1289 result ^= values.hashCode(); 1290 return result; 1291 } 1292 1293 // Comparable 1294 /** 1295 * Compares the descriptor with another descriptor which is passed as a parameter. This compares 1296 * the content of the two descriptors and not the reference. 1297 * @param other The MTD to compare 1298 * @return 0 if the contents of the descriptors are exactly matching, 1 if there is a mismatch 1299 * in the contents 1300 */ 1301 @Override 1302 public int compareTo(final ModifyableTableDescriptor other) { 1303 return TableDescriptor.COMPARATOR.compare(this, other); 1304 } 1305 1306 @Override 1307 public ColumnFamilyDescriptor[] getColumnFamilies() { 1308 return families.values().toArray(new ColumnFamilyDescriptor[families.size()]); 1309 } 1310 1311 /** 1312 * Returns the configured replicas per region 1313 */ 1314 @Override 1315 public int getRegionReplication() { 1316 return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION); 1317 } 1318 1319 /** 1320 * Sets the number of replicas per region. 1321 * @param regionReplication the replication factor per region 1322 * @return the modifyable TD 1323 */ 1324 public ModifyableTableDescriptor setRegionReplication(int regionReplication) { 1325 return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication)); 1326 } 1327 1328 /** Returns true if the read-replicas memstore replication is enabled. */ 1329 @Override 1330 public boolean hasRegionMemStoreReplication() { 1331 return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, 1332 DEFAULT_REGION_MEMSTORE_REPLICATION); 1333 } 1334 1335 /** 1336 * Enable or Disable the memstore replication from the primary region to the replicas. The 1337 * replication will be used only for meta operations (e.g. flush, compaction, ...) 1338 * @param memstoreReplication true if the new data written to the primary region should be 1339 * replicated. false if the secondaries can tollerate to have new 1340 * data only when the primary flushes the memstore. 1341 * @return the modifyable TD 1342 */ 1343 public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) { 1344 return setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication)); 1345 } 1346 1347 public ModifyableTableDescriptor setPriority(int priority) { 1348 return setValue(PRIORITY_KEY, Integer.toString(priority)); 1349 } 1350 1351 @Override 1352 public int getPriority() { 1353 return getOrDefault(PRIORITY_KEY, Integer::valueOf, DEFAULT_PRIORITY); 1354 } 1355 1356 /** 1357 * Returns all the column family names of the current table. The map of TableDescriptor contains 1358 * mapping of family name to ColumnFamilyDescriptor. This returns all the keys of the family map 1359 * which represents the column family names of the table. 1360 * @return Immutable sorted set of the keys of the families. 1361 */ 1362 @Override 1363 public Set<byte[]> getColumnFamilyNames() { 1364 return Collections.unmodifiableSet(this.families.keySet()); 1365 } 1366 1367 /** 1368 * Returns the ColumnFamilyDescriptor for a specific column family with name as specified by the 1369 * parameter column. 1370 * @param column Column family name 1371 * @return Column descriptor for the passed family name or the family on passed in column. 1372 */ 1373 @Override 1374 public ColumnFamilyDescriptor getColumnFamily(final byte[] column) { 1375 return this.families.get(column); 1376 } 1377 1378 /** 1379 * Removes the ColumnFamilyDescriptor with name specified by the parameter column from the table 1380 * descriptor 1381 * @param column Name of the column family to be removed. 1382 * @return Column descriptor for the passed family name or the family on passed in column. 1383 */ 1384 public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { 1385 return this.families.remove(column); 1386 } 1387 1388 /** 1389 * Add a table coprocessor to this table. The coprocessor type must be 1390 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1391 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1392 * region is opened. 1393 * @param className Full class name. 1394 * @return the modifyable TD 1395 */ 1396 public ModifyableTableDescriptor setCoprocessor(String className) throws IOException { 1397 return setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className) 1398 .setPriority(Coprocessor.PRIORITY_USER).build()); 1399 } 1400 1401 /** 1402 * Add a table coprocessor to this table. The coprocessor type must be 1403 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1404 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1405 * region is opened. 1406 * @throws IOException any illegal parameter key/value 1407 * @return the modifyable TD 1408 */ 1409 public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) throws IOException { 1410 checkHasCoprocessor(cp.getClassName()); 1411 if (cp.getPriority() < 0) { 1412 throw new IOException( 1413 "Priority must be bigger than or equal with zero, current:" + cp.getPriority()); 1414 } 1415 // Validate parameter kvs and then add key/values to kvString. 1416 StringBuilder kvString = new StringBuilder(); 1417 for (Map.Entry<String, String> e : cp.getProperties().entrySet()) { 1418 if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) { 1419 throw new IOException("Illegal parameter key = " + e.getKey()); 1420 } 1421 if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) { 1422 throw new IOException("Illegal parameter (" + e.getKey() + ") value = " + e.getValue()); 1423 } 1424 if (kvString.length() != 0) { 1425 kvString.append(','); 1426 } 1427 kvString.append(e.getKey()); 1428 kvString.append('='); 1429 kvString.append(e.getValue()); 1430 } 1431 1432 String value = cp.getJarPath().orElse("") + "|" + cp.getClassName() + "|" 1433 + Integer.toString(cp.getPriority()) + "|" + kvString.toString(); 1434 return setCoprocessorToMap(value); 1435 } 1436 1437 /** 1438 * Add a table coprocessor to this table. The coprocessor type must be 1439 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1440 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1441 * region is opened. 1442 * @param specStr The Coprocessor specification all in in one String 1443 * @return the modifyable TD 1444 * @deprecated used by HTableDescriptor and admin.rb. As of release 2.0.0, this will be removed 1445 * in HBase 3.0.0. 1446 */ 1447 @Deprecated 1448 public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr) 1449 throws IOException { 1450 CoprocessorDescriptor cpDesc = 1451 toCoprocessorDescriptor(specStr).orElseThrow(() -> new IllegalArgumentException( 1452 "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr)); 1453 checkHasCoprocessor(cpDesc.getClassName()); 1454 return setCoprocessorToMap(specStr); 1455 } 1456 1457 private void checkHasCoprocessor(final String className) throws IOException { 1458 if (hasCoprocessor(className)) { 1459 throw new IOException("Coprocessor " + className + " already exists."); 1460 } 1461 } 1462 1463 /** 1464 * Add coprocessor to values Map 1465 * @param specStr The Coprocessor specification all in in one String 1466 * @return Returns <code>this</code> 1467 */ 1468 private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) { 1469 if (specStr == null) { 1470 return this; 1471 } 1472 // generate a coprocessor key 1473 int maxCoprocessorNumber = 0; 1474 Matcher keyMatcher; 1475 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1476 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1477 if (!keyMatcher.matches()) { 1478 continue; 1479 } 1480 maxCoprocessorNumber = 1481 Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber); 1482 } 1483 maxCoprocessorNumber++; 1484 String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber); 1485 return setValue(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr))); 1486 } 1487 1488 /** 1489 * Check if the table has an attached co-processor represented by the name className 1490 * @param classNameToMatch - Class name of the co-processor 1491 * @return true of the table has a co-processor className 1492 */ 1493 @Override 1494 public boolean hasCoprocessor(String classNameToMatch) { 1495 return getCoprocessorDescriptors().stream() 1496 .anyMatch(cp -> cp.getClassName().equals(classNameToMatch)); 1497 } 1498 1499 /** 1500 * Return the list of attached co-processor represented by their name className 1501 * @return The list of co-processors classNames 1502 */ 1503 @Override 1504 public List<CoprocessorDescriptor> getCoprocessorDescriptors() { 1505 List<CoprocessorDescriptor> result = new ArrayList<>(); 1506 for (Map.Entry<Bytes, Bytes> e : getValues().entrySet()) { 1507 String key = Bytes.toString(e.getKey().get()).trim(); 1508 if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) { 1509 toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()).ifPresent(result::add); 1510 } 1511 } 1512 return result; 1513 } 1514 1515 /** 1516 * Remove a coprocessor from those set on the table 1517 * @param className Class name of the co-processor 1518 */ 1519 public void removeCoprocessor(String className) { 1520 Bytes match = null; 1521 Matcher keyMatcher; 1522 Matcher valueMatcher; 1523 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1524 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1525 if (!keyMatcher.matches()) { 1526 continue; 1527 } 1528 valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes.toString(e.getValue().get())); 1529 if (!valueMatcher.matches()) { 1530 continue; 1531 } 1532 // get className and compare 1533 String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field 1534 // remove the CP if it is present 1535 if (clazz.equals(className.trim())) { 1536 match = e.getKey(); 1537 break; 1538 } 1539 } 1540 // if we found a match, remove it 1541 if (match != null) { 1542 ModifyableTableDescriptor.this.removeValue(match); 1543 } else { 1544 LOG.warn("coprocessor with class name {} was not found in the table attribute", className); 1545 } 1546 } 1547 1548 /** 1549 * Set the table owner 1550 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1551 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1552 */ 1553 @Deprecated 1554 public ModifyableTableDescriptor setOwner(User owner) { 1555 return setOwnerString(owner != null ? owner.getShortName() : null); 1556 } 1557 1558 /** 1559 * Set the table owner. 1560 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1561 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1562 */ 1563 // used by admin.rb:alter(table_name,*args) to update owner. 1564 @Deprecated 1565 public ModifyableTableDescriptor setOwnerString(String ownerString) { 1566 return setValue(OWNER_KEY, ownerString); 1567 } 1568 1569 /** 1570 * Set the table owner. 1571 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1572 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1573 */ 1574 @Override 1575 @Deprecated 1576 public String getOwnerString() { 1577 // Note that every table should have an owner (i.e. should have OWNER_KEY set). 1578 // hbase:meta should return system user as owner, not null (see 1579 // MasterFileSystem.java:bootstrap()). 1580 return getOrDefault(OWNER_KEY, Function.identity(), null); 1581 } 1582 1583 /** Returns the bytes in pb format */ 1584 private byte[] toByteArray() { 1585 return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray()); 1586 } 1587 1588 /** 1589 * Parse the serialized representation of a {@link ModifyableTableDescriptor} 1590 * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix 1591 * @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code> 1592 * @see #toByteArray() 1593 */ 1594 private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException { 1595 if (!ProtobufUtil.isPBMagicPrefix(bytes)) { 1596 throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor"); 1597 } 1598 int pblen = ProtobufUtil.lengthOfPBMagic(); 1599 HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder(); 1600 try { 1601 ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); 1602 return ProtobufUtil.toTableDescriptor(builder.build()); 1603 } catch (IOException e) { 1604 throw new DeserializationException(e); 1605 } 1606 } 1607 1608 @Override 1609 public int getColumnFamilyCount() { 1610 return families.size(); 1611 } 1612 1613 @Override 1614 public Optional<String> getRegionServerGroup() { 1615 Bytes value = values.get(RSGROUP_KEY); 1616 if (value != null) { 1617 return Optional.of(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 1618 } else { 1619 return Optional.empty(); 1620 } 1621 } 1622 } 1623 1624 private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) { 1625 Matcher matcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(spec); 1626 if (matcher.matches()) { 1627 // jar file path can be empty if the cp class can be loaded 1628 // from class loader. 1629 String path = matcher.group(1).trim().isEmpty() ? null : matcher.group(1).trim(); 1630 String className = matcher.group(2).trim(); 1631 if (className.isEmpty()) { 1632 return Optional.empty(); 1633 } 1634 String priorityStr = matcher.group(3).trim(); 1635 int priority = 1636 priorityStr.isEmpty() ? Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); 1637 String cfgSpec = null; 1638 try { 1639 cfgSpec = matcher.group(4); 1640 } catch (IndexOutOfBoundsException ex) { 1641 // ignore 1642 } 1643 Map<String, String> ourConf = new TreeMap<>(); 1644 if (cfgSpec != null && !cfgSpec.trim().equals("|")) { 1645 cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1); 1646 Matcher m = CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec); 1647 while (m.find()) { 1648 ourConf.put(m.group(1), m.group(2)); 1649 } 1650 } 1651 return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) 1652 .setPriority(priority).setProperties(ourConf).build()); 1653 } 1654 return Optional.empty(); 1655 } 1656}