001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.hfile; 019 020import java.io.ByteArrayInputStream; 021import java.io.ByteArrayOutputStream; 022import java.io.DataInput; 023import java.io.DataInputStream; 024import java.io.DataOutputStream; 025import java.io.IOException; 026import java.nio.ByteBuffer; 027import org.apache.hadoop.fs.FSDataInputStream; 028import org.apache.hadoop.hbase.CellComparator; 029import org.apache.hadoop.hbase.CellComparatorImpl; 030import org.apache.hadoop.hbase.InnerStoreCellComparator; 031import org.apache.hadoop.hbase.KeyValue; 032import org.apache.hadoop.hbase.MetaCellComparator; 033import org.apache.hadoop.hbase.io.compress.Compression; 034import org.apache.hadoop.hbase.util.Bytes; 035import org.apache.yetus.audience.InterfaceAudience; 036import org.slf4j.Logger; 037import org.slf4j.LoggerFactory; 038 039import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; 040 041import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; 042 043/** 044 * The {@link HFile} has a fixed trailer which contains offsets to other variable parts of the file. 045 * Also includes basic metadata on this file. The trailer size is fixed within a given {@link HFile} 046 * format version only, but we always store the version number as the last four-byte integer of the 047 * file. The version number itself is split into two portions, a major version and a minor version. 048 * The last three bytes of a file are the major version and a single preceding byte is the minor 049 * number. The major version determines which readers/writers to use to read/write a hfile while a 050 * minor version determines smaller changes in hfile format that do not need a new reader/writer 051 * type. 052 */ 053@InterfaceAudience.Private 054public class FixedFileTrailer { 055 private static final Logger LOG = LoggerFactory.getLogger(FixedFileTrailer.class); 056 057 /** 058 * We store the comparator class name as a fixed-length field in the trailer. 059 */ 060 private static final int MAX_COMPARATOR_NAME_LENGTH = 128; 061 062 /** 063 * Offset to the fileinfo data, a small block of vitals. Necessary in v1 but only potentially 064 * useful for pretty-printing in v2. 065 */ 066 private long fileInfoOffset; 067 068 /** 069 * In version 1, the offset to the data block index. Starting from version 2, the meaning of this 070 * field is the offset to the section of the file that should be loaded at the time the file is 071 * being opened: i.e. on open we load the root index, file info, etc. See 072 * http://hbase.apache.org/book.html#_hfile_format_2 in the reference guide. 073 */ 074 private long loadOnOpenDataOffset; 075 076 /** 077 * The number of entries in the root data index. 078 */ 079 private int dataIndexCount; 080 081 /** 082 * Total uncompressed size of all blocks of the data index 083 */ 084 private long uncompressedDataIndexSize; 085 086 /** 087 * The number of entries in the meta index 088 */ 089 private int metaIndexCount; 090 091 /** 092 * The total uncompressed size of keys/values stored in the file. 093 */ 094 private long totalUncompressedBytes; 095 096 /** 097 * The number of key/value pairs in the file. This field was int in version 1, but is now long. 098 */ 099 private long entryCount; 100 101 /** 102 * The compression codec used for all blocks. 103 */ 104 private Compression.Algorithm compressionCodec = Compression.Algorithm.NONE; 105 106 /** 107 * The number of levels in the potentially multi-level data index. Used from version 2 onwards. 108 */ 109 private int numDataIndexLevels; 110 111 /** 112 * The offset of the first data block. 113 */ 114 private long firstDataBlockOffset; 115 116 /** 117 * It is guaranteed that no key/value data blocks start after this offset in the file. 118 */ 119 private long lastDataBlockOffset; 120 121 /** 122 * Raw key comparator class name in version 3 123 */ 124 // We could write the actual class name from 2.0 onwards and handle BC 125 private String comparatorClassName = 126 InnerStoreCellComparator.INNER_STORE_COMPARATOR.getClass().getName(); 127 128 /** 129 * The encryption key 130 */ 131 private byte[] encryptionKey; 132 133 /** 134 * The {@link HFile} format major version. 135 */ 136 private final int majorVersion; 137 138 /** 139 * The {@link HFile} format minor version. 140 */ 141 private final int minorVersion; 142 143 FixedFileTrailer(int majorVersion, int minorVersion) { 144 this.majorVersion = majorVersion; 145 this.minorVersion = minorVersion; 146 HFile.checkFormatVersion(majorVersion); 147 } 148 149 private static int[] computeTrailerSizeByVersion() { 150 int[] versionToSize = new int[HFile.MAX_FORMAT_VERSION + 1]; 151 // We support only 2 major versions now. ie. V2, V3 152 versionToSize[2] = 212; 153 for (int version = 3; version <= HFile.MAX_FORMAT_VERSION; version++) { 154 // Max FFT size for V3 and above is taken as 4KB for future enhancements 155 // if any. 156 // Unless the trailer size exceeds 4K this can continue 157 versionToSize[version] = 1024 * 4; 158 } 159 return versionToSize; 160 } 161 162 private static int getMaxTrailerSize() { 163 int maxSize = 0; 164 for (int version = HFile.MIN_FORMAT_VERSION; version <= HFile.MAX_FORMAT_VERSION; ++version) { 165 maxSize = Math.max(getTrailerSize(version), maxSize); 166 } 167 return maxSize; 168 } 169 170 private static final int[] TRAILER_SIZE = computeTrailerSizeByVersion(); 171 private static final int MAX_TRAILER_SIZE = getMaxTrailerSize(); 172 173 private static final int NOT_PB_SIZE = BlockType.MAGIC_LENGTH + Bytes.SIZEOF_INT; 174 175 static int getTrailerSize(int version) { 176 return TRAILER_SIZE[version]; 177 } 178 179 public int getTrailerSize() { 180 return getTrailerSize(majorVersion); 181 } 182 183 /** 184 * Write the trailer to a data stream. We support writing version 1 for testing and for 185 * determining version 1 trailer size. It is also easy to see what fields changed in version 2. 186 */ 187 void serialize(DataOutputStream outputStream) throws IOException { 188 HFile.checkFormatVersion(majorVersion); 189 190 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 191 DataOutputStream baosDos = new DataOutputStream(baos); 192 193 BlockType.TRAILER.write(baosDos); 194 serializeAsPB(baosDos); 195 196 // The last 4 bytes of the file encode the major and minor version universally 197 baosDos.writeInt(materializeVersion(majorVersion, minorVersion)); 198 199 baos.writeTo(outputStream); 200 } 201 202 HFileProtos.FileTrailerProto toProtobuf() { 203 HFileProtos.FileTrailerProto.Builder builder = HFileProtos.FileTrailerProto.newBuilder() 204 .setFileInfoOffset(fileInfoOffset).setLoadOnOpenDataOffset(loadOnOpenDataOffset) 205 .setUncompressedDataIndexSize(uncompressedDataIndexSize) 206 .setTotalUncompressedBytes(totalUncompressedBytes).setDataIndexCount(dataIndexCount) 207 .setMetaIndexCount(metaIndexCount).setEntryCount(entryCount) 208 .setNumDataIndexLevels(numDataIndexLevels).setFirstDataBlockOffset(firstDataBlockOffset) 209 .setLastDataBlockOffset(lastDataBlockOffset) 210 .setComparatorClassName(getHBase1CompatibleName(comparatorClassName)) 211 .setCompressionCodec(compressionCodec.ordinal()); 212 if (encryptionKey != null) { 213 builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(encryptionKey)); 214 } 215 return builder.build(); 216 } 217 218 /** 219 * Write trailer data as protobuf. NOTE: we run a translation on the comparator name and will 220 * serialize the old hbase-1.x where it makes sense. See {@link #getHBase1CompatibleName(String)}. 221 */ 222 void serializeAsPB(DataOutputStream output) throws IOException { 223 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 224 // We need this extra copy unfortunately to determine the final size of the 225 // delimited output, see use of baos.size() below. 226 toProtobuf().writeDelimitedTo(baos); 227 baos.writeTo(output); 228 // Pad to make up the difference between variable PB encoding length and the 229 // length when encoded as writable under earlier V2 formats. Failure to pad 230 // properly or if the PB encoding is too big would mean the trailer wont be read 231 // in properly by HFile. 232 int padding = getTrailerSize() - NOT_PB_SIZE - baos.size(); 233 if (padding < 0) { 234 throw new IOException("Pbuf encoding size exceeded fixed trailer size limit"); 235 } 236 for (int i = 0; i < padding; i++) { 237 output.write(0); 238 } 239 } 240 241 /** 242 * Deserialize the fixed file trailer from the given stream. The version needs to already be 243 * specified. Make sure this is consistent with {@link #serialize(DataOutputStream)}. 244 */ 245 void deserialize(DataInputStream inputStream) throws IOException { 246 HFile.checkFormatVersion(majorVersion); 247 248 BlockType.TRAILER.readAndCheck(inputStream); 249 250 if ( 251 majorVersion > 2 252 || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION) 253 ) { 254 deserializeFromPB(inputStream); 255 } else { 256 deserializeFromWritable(inputStream); 257 } 258 259 // The last 4 bytes of the file encode the major and minor version universally 260 int version = inputStream.readInt(); 261 expectMajorVersion(extractMajorVersion(version)); 262 expectMinorVersion(extractMinorVersion(version)); 263 } 264 265 /** 266 * Deserialize the file trailer as protobuf 267 */ 268 void deserializeFromPB(DataInputStream inputStream) throws IOException { 269 // read PB and skip padding 270 int start = inputStream.available(); 271 HFileProtos.FileTrailerProto trailerProto = 272 HFileProtos.FileTrailerProto.parser().parseDelimitedFrom(inputStream); 273 int size = start - inputStream.available(); 274 inputStream.skip(getTrailerSize() - NOT_PB_SIZE - size); 275 276 // process the PB 277 if (trailerProto.hasFileInfoOffset()) { 278 fileInfoOffset = trailerProto.getFileInfoOffset(); 279 } 280 if (trailerProto.hasLoadOnOpenDataOffset()) { 281 loadOnOpenDataOffset = trailerProto.getLoadOnOpenDataOffset(); 282 } 283 if (trailerProto.hasUncompressedDataIndexSize()) { 284 uncompressedDataIndexSize = trailerProto.getUncompressedDataIndexSize(); 285 } 286 if (trailerProto.hasTotalUncompressedBytes()) { 287 totalUncompressedBytes = trailerProto.getTotalUncompressedBytes(); 288 } 289 if (trailerProto.hasDataIndexCount()) { 290 dataIndexCount = trailerProto.getDataIndexCount(); 291 } 292 if (trailerProto.hasMetaIndexCount()) { 293 metaIndexCount = trailerProto.getMetaIndexCount(); 294 } 295 if (trailerProto.hasEntryCount()) { 296 entryCount = trailerProto.getEntryCount(); 297 } 298 if (trailerProto.hasNumDataIndexLevels()) { 299 numDataIndexLevels = trailerProto.getNumDataIndexLevels(); 300 } 301 if (trailerProto.hasFirstDataBlockOffset()) { 302 firstDataBlockOffset = trailerProto.getFirstDataBlockOffset(); 303 } 304 if (trailerProto.hasLastDataBlockOffset()) { 305 lastDataBlockOffset = trailerProto.getLastDataBlockOffset(); 306 } 307 if (trailerProto.hasComparatorClassName()) { 308 setComparatorClass(getComparatorClass(trailerProto.getComparatorClassName())); 309 } 310 if (trailerProto.hasCompressionCodec()) { 311 compressionCodec = Compression.Algorithm.values()[trailerProto.getCompressionCodec()]; 312 } else { 313 compressionCodec = Compression.Algorithm.NONE; 314 } 315 if (trailerProto.hasEncryptionKey()) { 316 encryptionKey = trailerProto.getEncryptionKey().toByteArray(); 317 } 318 } 319 320 /** 321 * Deserialize the file trailer as writable data 322 */ 323 void deserializeFromWritable(DataInput input) throws IOException { 324 fileInfoOffset = input.readLong(); 325 loadOnOpenDataOffset = input.readLong(); 326 dataIndexCount = input.readInt(); 327 uncompressedDataIndexSize = input.readLong(); 328 metaIndexCount = input.readInt(); 329 330 totalUncompressedBytes = input.readLong(); 331 entryCount = input.readLong(); 332 compressionCodec = Compression.Algorithm.values()[input.readInt()]; 333 numDataIndexLevels = input.readInt(); 334 firstDataBlockOffset = input.readLong(); 335 lastDataBlockOffset = input.readLong(); 336 // TODO this is a classname encoded into an HFile's trailer. We are going to need to have 337 // some compat code here. 338 setComparatorClass( 339 getComparatorClass(Bytes.readStringFixedSize(input, MAX_COMPARATOR_NAME_LENGTH))); 340 } 341 342 private void append(StringBuilder sb, String s) { 343 if (sb.length() > 0) { 344 sb.append(", "); 345 } 346 sb.append(s); 347 } 348 349 @Override 350 public String toString() { 351 StringBuilder sb = new StringBuilder(); 352 append(sb, "fileinfoOffset=" + fileInfoOffset); 353 append(sb, "loadOnOpenDataOffset=" + loadOnOpenDataOffset); 354 append(sb, "dataIndexCount=" + dataIndexCount); 355 append(sb, "metaIndexCount=" + metaIndexCount); 356 append(sb, "totalUncomressedBytes=" + totalUncompressedBytes); 357 append(sb, "entryCount=" + entryCount); 358 append(sb, "compressionCodec=" + compressionCodec); 359 append(sb, "uncompressedDataIndexSize=" + uncompressedDataIndexSize); 360 append(sb, "numDataIndexLevels=" + numDataIndexLevels); 361 append(sb, "firstDataBlockOffset=" + firstDataBlockOffset); 362 append(sb, "lastDataBlockOffset=" + lastDataBlockOffset); 363 append(sb, "comparatorClassName=" + comparatorClassName); 364 if (majorVersion >= 3) { 365 append(sb, "encryptionKey=" + (encryptionKey != null ? "PRESENT" : "NONE")); 366 } 367 append(sb, "majorVersion=" + majorVersion); 368 append(sb, "minorVersion=" + minorVersion); 369 370 return sb.toString(); 371 } 372 373 /** 374 * Reads a file trailer from the given file. 375 * @param istream the input stream with the ability to seek. Does not have to be buffered, as 376 * only one read operation is made. 377 * @param fileSize the file size. Can be obtained using 378 * {@link org.apache.hadoop.fs.FileSystem#getFileStatus( org.apache.hadoop.fs.Path)}. 379 * @return the fixed file trailer read 380 * @throws IOException if failed to read from the underlying stream, or the trailer is corrupted, 381 * or the version of the trailer is unsupported 382 */ 383 public static FixedFileTrailer readFromStream(FSDataInputStream istream, long fileSize) 384 throws IOException { 385 int bufferSize = MAX_TRAILER_SIZE; 386 long seekPoint = fileSize - bufferSize; 387 if (seekPoint < 0) { 388 // It is hard to imagine such a small HFile. 389 seekPoint = 0; 390 bufferSize = (int) fileSize; 391 } 392 393 istream.seek(seekPoint); 394 395 ByteBuffer buf = ByteBuffer.allocate(bufferSize); 396 istream.readFully(buf.array(), buf.arrayOffset(), buf.arrayOffset() + buf.limit()); 397 398 // Read the version from the last int of the file. 399 buf.position(buf.limit() - Bytes.SIZEOF_INT); 400 int version = buf.getInt(); 401 402 // Extract the major and minor versions. 403 int majorVersion = extractMajorVersion(version); 404 int minorVersion = extractMinorVersion(version); 405 406 HFile.checkFormatVersion(majorVersion); // throws IAE if invalid 407 408 int trailerSize = getTrailerSize(majorVersion); 409 410 FixedFileTrailer fft = new FixedFileTrailer(majorVersion, minorVersion); 411 fft.deserialize(new DataInputStream(new ByteArrayInputStream(buf.array(), 412 buf.arrayOffset() + bufferSize - trailerSize, trailerSize))); 413 return fft; 414 } 415 416 public void expectMajorVersion(int expected) { 417 if (majorVersion != expected) { 418 throw new IllegalArgumentException( 419 "Invalid HFile major version: " + majorVersion + " (expected: " + expected + ")"); 420 } 421 } 422 423 public void expectMinorVersion(int expected) { 424 if (minorVersion != expected) { 425 throw new IllegalArgumentException( 426 "Invalid HFile minor version: " + minorVersion + " (expected: " + expected + ")"); 427 } 428 } 429 430 public void expectAtLeastMajorVersion(int lowerBound) { 431 if (majorVersion < lowerBound) { 432 throw new IllegalArgumentException("Invalid HFile major version: " + majorVersion 433 + " (expected: " + lowerBound + " or higher)."); 434 } 435 } 436 437 public long getFileInfoOffset() { 438 return fileInfoOffset; 439 } 440 441 public void setFileInfoOffset(long fileInfoOffset) { 442 this.fileInfoOffset = fileInfoOffset; 443 } 444 445 public long getLoadOnOpenDataOffset() { 446 return loadOnOpenDataOffset; 447 } 448 449 public void setLoadOnOpenOffset(long loadOnOpenDataOffset) { 450 this.loadOnOpenDataOffset = loadOnOpenDataOffset; 451 } 452 453 public int getDataIndexCount() { 454 return dataIndexCount; 455 } 456 457 public void setDataIndexCount(int dataIndexCount) { 458 this.dataIndexCount = dataIndexCount; 459 } 460 461 public int getMetaIndexCount() { 462 return metaIndexCount; 463 } 464 465 public void setMetaIndexCount(int metaIndexCount) { 466 this.metaIndexCount = metaIndexCount; 467 } 468 469 public long getTotalUncompressedBytes() { 470 return totalUncompressedBytes; 471 } 472 473 public void setTotalUncompressedBytes(long totalUncompressedBytes) { 474 this.totalUncompressedBytes = totalUncompressedBytes; 475 } 476 477 public long getEntryCount() { 478 return entryCount; 479 } 480 481 public void setEntryCount(long newEntryCount) { 482 entryCount = newEntryCount; 483 } 484 485 public Compression.Algorithm getCompressionCodec() { 486 return compressionCodec; 487 } 488 489 public void setCompressionCodec(Compression.Algorithm compressionCodec) { 490 this.compressionCodec = compressionCodec; 491 } 492 493 public int getNumDataIndexLevels() { 494 expectAtLeastMajorVersion(2); 495 return numDataIndexLevels; 496 } 497 498 public void setNumDataIndexLevels(int numDataIndexLevels) { 499 expectAtLeastMajorVersion(2); 500 this.numDataIndexLevels = numDataIndexLevels; 501 } 502 503 public long getLastDataBlockOffset() { 504 expectAtLeastMajorVersion(2); 505 return lastDataBlockOffset; 506 } 507 508 public void setLastDataBlockOffset(long lastDataBlockOffset) { 509 expectAtLeastMajorVersion(2); 510 this.lastDataBlockOffset = lastDataBlockOffset; 511 } 512 513 public long getFirstDataBlockOffset() { 514 expectAtLeastMajorVersion(2); 515 return firstDataBlockOffset; 516 } 517 518 public void setFirstDataBlockOffset(long firstDataBlockOffset) { 519 expectAtLeastMajorVersion(2); 520 this.firstDataBlockOffset = firstDataBlockOffset; 521 } 522 523 public String getComparatorClassName() { 524 return comparatorClassName; 525 } 526 527 /** 528 * Returns the major version of this HFile format 529 */ 530 public int getMajorVersion() { 531 return majorVersion; 532 } 533 534 /** 535 * Returns the minor version of this HFile format 536 */ 537 public int getMinorVersion() { 538 return minorVersion; 539 } 540 541 public void setComparatorClass(Class<? extends CellComparator> klass) { 542 // Is the comparator instantiable? 543 try { 544 // If null, it should be the Bytes.BYTES_RAWCOMPARATOR 545 if (klass != null) { 546 CellComparator comp = klass.getDeclaredConstructor().newInstance(); 547 // if the name wasn't one of the legacy names, maybe its a legit new 548 // kind of comparator. 549 this.comparatorClassName = klass.getName(); 550 } 551 } catch (Exception e) { 552 throw new RuntimeException("Comparator class " + klass.getName() + " is not instantiable", e); 553 } 554 } 555 556 /** 557 * If a 'standard' Comparator, write the old name for the Comparator when we serialize rather than 558 * the new name; writing the new name will make it so newly-written hfiles are not parseable by 559 * hbase-1.x, a facility we'd like to preserve across rolling upgrade and hbase-1.x clusters 560 * reading hbase-2.x produce. 561 * <p> 562 * The Comparators in hbase-2.x work the same as they did in hbase-1.x; they compare KeyValues. In 563 * hbase-2.x they were renamed making use of the more generic 'Cell' nomenclature to indicate that 564 * we intend to move away from KeyValues post hbase-2. A naming change is not reason enough to 565 * make it so hbase-1.x cannot read hbase-2.x files given the structure goes unchanged (hfile v3). 566 * So, lets write the old names for Comparators into the hfile tails in hbase-2. Here is where we 567 * do the translation. {@link #getComparatorClass(String)} does translation going the other way. 568 * <p> 569 * The translation is done on the serialized Protobuf only. 570 * </p> 571 * @param comparator String class name of the Comparator used in this hfile. 572 * @return What to store in the trailer as our comparator name. 573 * @see #getComparatorClass(String) 574 * @since hbase-2.0.0. 575 * @deprecated Since hbase-2.0.0. Will be removed in hbase-3.0.0. 576 */ 577 @Deprecated 578 private String getHBase1CompatibleName(final String comparator) { 579 if ( 580 comparator.equals(CellComparatorImpl.class.getName()) 581 || comparator.equals(InnerStoreCellComparator.class.getName()) 582 ) { 583 return KeyValue.COMPARATOR.getClass().getName(); 584 } 585 if (comparator.equals(MetaCellComparator.class.getName())) { 586 return KeyValue.META_COMPARATOR.getClass().getName(); 587 } 588 return comparator; 589 } 590 591 @SuppressWarnings("unchecked") 592 private static Class<? extends CellComparator> getComparatorClass(String comparatorClassName) 593 throws IOException { 594 Class<? extends CellComparator> comparatorKlass; 595 // for BC 596 if ( 597 comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName()) 598 || comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName()) 599 || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator")) 600 ) { 601 comparatorKlass = InnerStoreCellComparator.class; 602 } else if ( 603 comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()) 604 || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName()) 605 || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator")) 606 || (comparatorClassName 607 .equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator")) 608 || (comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator")) 609 ) { 610 comparatorKlass = MetaCellComparator.class; 611 } else if ( 612 comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator") 613 || comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator") 614 ) { 615 // When the comparator to be used is Bytes.BYTES_RAWCOMPARATOR, we just return null from here 616 // Bytes.BYTES_RAWCOMPARATOR is not a CellComparator 617 comparatorKlass = null; 618 } else { 619 // if the name wasn't one of the legacy names, maybe its a legit new kind of comparator. 620 try { 621 comparatorKlass = (Class<? extends CellComparator>) Class.forName(comparatorClassName); 622 } catch (ClassNotFoundException e) { 623 throw new IOException(e); 624 } 625 } 626 return comparatorKlass; 627 } 628 629 static CellComparator createComparator(String comparatorClassName) throws IOException { 630 if ( 631 comparatorClassName 632 .equals(InnerStoreCellComparator.INNER_STORE_COMPARATOR.getClass().getName()) 633 ) { 634 return InnerStoreCellComparator.INNER_STORE_COMPARATOR; 635 } else 636 if (comparatorClassName.equals(MetaCellComparator.META_COMPARATOR.getClass().getName())) { 637 return MetaCellComparator.META_COMPARATOR; 638 } 639 try { 640 Class<? extends CellComparator> comparatorClass = getComparatorClass(comparatorClassName); 641 if (comparatorClass != null) { 642 return comparatorClass.getDeclaredConstructor().newInstance(); 643 } 644 LOG.warn("No Comparator class for " + comparatorClassName + ". Returning Null."); 645 return null; 646 } catch (Exception e) { 647 throw new IOException("Comparator class " + comparatorClassName + " is not instantiable", e); 648 } 649 } 650 651 CellComparator createComparator() throws IOException { 652 expectAtLeastMajorVersion(2); 653 return createComparator(comparatorClassName); 654 } 655 656 public long getUncompressedDataIndexSize() { 657 return uncompressedDataIndexSize; 658 } 659 660 public void setUncompressedDataIndexSize(long uncompressedDataIndexSize) { 661 expectAtLeastMajorVersion(2); 662 this.uncompressedDataIndexSize = uncompressedDataIndexSize; 663 } 664 665 public byte[] getEncryptionKey() { 666 // This is a v3 feature but if reading a v2 file the encryptionKey will just be null which 667 // if fine for this feature. 668 expectAtLeastMajorVersion(2); 669 return encryptionKey; 670 } 671 672 public void setEncryptionKey(byte[] keyBytes) { 673 this.encryptionKey = keyBytes; 674 } 675 676 /** 677 * Extracts the major version for a 4-byte serialized version data. The major version is the 3 678 * least significant bytes 679 */ 680 private static int extractMajorVersion(int serializedVersion) { 681 return (serializedVersion & 0x00ffffff); 682 } 683 684 /** 685 * Extracts the minor version for a 4-byte serialized version data. The major version are the 3 686 * the most significant bytes 687 */ 688 private static int extractMinorVersion(int serializedVersion) { 689 return (serializedVersion >>> 24); 690 } 691 692 /** 693 * Create a 4 byte serialized version number by combining the minor and major version numbers. 694 */ 695 static int materializeVersion(int majorVersion, int minorVersion) { 696 return ((majorVersion & 0x00ffffff) | (minorVersion << 24)); 697 } 698}