001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import com.google.errorprone.annotations.RestrictedApi; 021import java.io.IOException; 022import java.io.InterruptedIOException; 023import java.net.InetSocketAddress; 024import java.util.ArrayList; 025import java.util.Collection; 026import java.util.Collections; 027import java.util.HashMap; 028import java.util.HashSet; 029import java.util.Iterator; 030import java.util.List; 031import java.util.Map; 032import java.util.Map.Entry; 033import java.util.NavigableSet; 034import java.util.Optional; 035import java.util.OptionalDouble; 036import java.util.OptionalInt; 037import java.util.OptionalLong; 038import java.util.Set; 039import java.util.concurrent.Callable; 040import java.util.concurrent.CompletionService; 041import java.util.concurrent.ConcurrentHashMap; 042import java.util.concurrent.ExecutionException; 043import java.util.concurrent.ExecutorCompletionService; 044import java.util.concurrent.Future; 045import java.util.concurrent.ThreadPoolExecutor; 046import java.util.concurrent.atomic.AtomicBoolean; 047import java.util.concurrent.atomic.AtomicInteger; 048import java.util.concurrent.atomic.AtomicLong; 049import java.util.concurrent.atomic.LongAdder; 050import java.util.concurrent.locks.ReentrantLock; 051import java.util.function.Consumer; 052import java.util.function.Supplier; 053import java.util.function.ToLongFunction; 054import java.util.stream.Collectors; 055import java.util.stream.LongStream; 056import org.apache.hadoop.conf.Configuration; 057import org.apache.hadoop.fs.FileSystem; 058import org.apache.hadoop.fs.Path; 059import org.apache.hadoop.fs.permission.FsAction; 060import org.apache.hadoop.hbase.Cell; 061import org.apache.hadoop.hbase.CellComparator; 062import org.apache.hadoop.hbase.CellUtil; 063import org.apache.hadoop.hbase.HConstants; 064import org.apache.hadoop.hbase.InnerStoreCellComparator; 065import org.apache.hadoop.hbase.MemoryCompactionPolicy; 066import org.apache.hadoop.hbase.MetaCellComparator; 067import org.apache.hadoop.hbase.TableName; 068import org.apache.hadoop.hbase.backup.FailedArchiveException; 069import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 070import org.apache.hadoop.hbase.client.RegionInfo; 071import org.apache.hadoop.hbase.client.Scan; 072import org.apache.hadoop.hbase.conf.ConfigurationManager; 073import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; 074import org.apache.hadoop.hbase.coprocessor.ReadOnlyConfiguration; 075import org.apache.hadoop.hbase.io.HeapSize; 076import org.apache.hadoop.hbase.io.hfile.CacheConfig; 077import org.apache.hadoop.hbase.io.hfile.HFile; 078import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; 079import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; 080import org.apache.hadoop.hbase.io.hfile.HFileScanner; 081import org.apache.hadoop.hbase.io.hfile.InvalidHFileException; 082import org.apache.hadoop.hbase.monitoring.MonitoredTask; 083import org.apache.hadoop.hbase.quotas.RegionSizeStore; 084import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; 085import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; 086import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; 087import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; 088import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; 089import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher; 090import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; 091import org.apache.hadoop.hbase.regionserver.wal.WALUtil; 092import org.apache.hadoop.hbase.security.EncryptionUtil; 093import org.apache.hadoop.hbase.security.User; 094import org.apache.hadoop.hbase.util.Bytes; 095import org.apache.hadoop.hbase.util.ClassSize; 096import org.apache.hadoop.hbase.util.CommonFSUtils; 097import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 098import org.apache.hadoop.hbase.util.Pair; 099import org.apache.hadoop.hbase.util.ReflectionUtils; 100import org.apache.hadoop.util.StringUtils; 101import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; 102import org.apache.yetus.audience.InterfaceAudience; 103import org.slf4j.Logger; 104import org.slf4j.LoggerFactory; 105 106import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; 107import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollection; 108import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; 109import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 110import org.apache.hbase.thirdparty.com.google.common.collect.Maps; 111import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; 112import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils; 113 114import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 115import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; 116 117/** 118 * A Store holds a column family in a Region. Its a memstore and a set of zero or more StoreFiles, 119 * which stretch backwards over time. 120 * <p> 121 * There's no reason to consider append-logging at this level; all logging and locking is handled at 122 * the HRegion level. Store just provides services to manage sets of StoreFiles. One of the most 123 * important of those services is compaction services where files are aggregated once they pass a 124 * configurable threshold. 125 * <p> 126 * Locking and transactions are handled at a higher level. This API should not be called directly 127 * but by an HRegion manager. 128 */ 129@InterfaceAudience.Private 130public class HStore 131 implements Store, HeapSize, StoreConfigInformation, PropagatingConfigurationObserver { 132 public static final String MEMSTORE_CLASS_NAME = "hbase.regionserver.memstore.class"; 133 public static final String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY = 134 "hbase.server.compactchecker.interval.multiplier"; 135 public static final String BLOCKING_STOREFILES_KEY = "hbase.hstore.blockingStoreFiles"; 136 public static final String BLOCK_STORAGE_POLICY_KEY = "hbase.hstore.block.storage.policy"; 137 // "NONE" is not a valid storage policy and means we defer the policy to HDFS 138 public static final String DEFAULT_BLOCK_STORAGE_POLICY = "NONE"; 139 public static final int DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER = 1000; 140 public static final int DEFAULT_BLOCKING_STOREFILE_COUNT = 16; 141 142 // HBASE-24428 : Update compaction priority for recently split daughter regions 143 // so as to prioritize their compaction. 144 // Any compaction candidate with higher priority than compaction of newly split daugher regions 145 // should have priority value < (Integer.MIN_VALUE + 1000) 146 private static final int SPLIT_REGION_COMPACTION_PRIORITY = Integer.MIN_VALUE + 1000; 147 148 private static final Logger LOG = LoggerFactory.getLogger(HStore.class); 149 150 protected final MemStore memstore; 151 // This stores directory in the filesystem. 152 private final HRegion region; 153 protected Configuration conf; 154 private long lastCompactSize = 0; 155 volatile boolean forceMajor = false; 156 private AtomicLong storeSize = new AtomicLong(); 157 private AtomicLong totalUncompressedBytes = new AtomicLong(); 158 private LongAdder memstoreOnlyRowReadsCount = new LongAdder(); 159 // rows that has cells from both memstore and files (or only files) 160 private LongAdder mixedRowReadsCount = new LongAdder(); 161 162 /** 163 * Lock specific to archiving compacted store files. This avoids races around the combination of 164 * retrieving the list of compacted files and moving them to the archive directory. Since this is 165 * usually a background process (other than on close), we don't want to handle this with the store 166 * write lock, which would block readers and degrade performance. Locked by: - 167 * CompactedHFilesDispatchHandler via closeAndArchiveCompactedFiles() - close() 168 */ 169 final ReentrantLock archiveLock = new ReentrantLock(); 170 171 private final boolean verifyBulkLoads; 172 173 /** 174 * Use this counter to track concurrent puts. If TRACE-log is enabled, if we are over the 175 * threshold set by hbase.region.store.parallel.put.print.threshold (Default is 50) we will log a 176 * message that identifies the Store experience this high-level of concurrency. 177 */ 178 private final AtomicInteger currentParallelPutCount = new AtomicInteger(0); 179 private final int parallelPutCountPrintThreshold; 180 181 private ScanInfo scanInfo; 182 183 // All access must be synchronized. 184 // TODO: ideally, this should be part of storeFileManager, as we keep passing this to it. 185 private final List<HStoreFile> filesCompacting = Lists.newArrayList(); 186 187 // All access must be synchronized. 188 private final Set<ChangedReadersObserver> changedReaderObservers = 189 Collections.newSetFromMap(new ConcurrentHashMap<ChangedReadersObserver, Boolean>()); 190 191 private HFileDataBlockEncoder dataBlockEncoder; 192 193 final StoreEngine<?, ?, ?, ?> storeEngine; 194 195 private static final AtomicBoolean offPeakCompactionTracker = new AtomicBoolean(); 196 private volatile OffPeakHours offPeakHours; 197 198 private static final int DEFAULT_FLUSH_RETRIES_NUMBER = 10; 199 private int flushRetriesNumber; 200 private int pauseTime; 201 202 private long blockingFileCount; 203 private int compactionCheckMultiplier; 204 205 private AtomicLong flushedCellsCount = new AtomicLong(); 206 private AtomicLong compactedCellsCount = new AtomicLong(); 207 private AtomicLong majorCompactedCellsCount = new AtomicLong(); 208 private AtomicLong flushedCellsSize = new AtomicLong(); 209 private AtomicLong flushedOutputFileSize = new AtomicLong(); 210 private AtomicLong compactedCellsSize = new AtomicLong(); 211 private AtomicLong majorCompactedCellsSize = new AtomicLong(); 212 213 private final StoreContext storeContext; 214 215 // Used to track the store files which are currently being written. For compaction, if we want to 216 // compact store file [a, b, c] to [d], then here we will record 'd'. And we will also use it to 217 // track the store files being written when flushing. 218 // Notice that the creation is in the background compaction or flush thread and we will get the 219 // files in other thread, so it needs to be thread safe. 220 private static final class StoreFileWriterCreationTracker implements Consumer<Path> { 221 222 private final Set<Path> files = Collections.newSetFromMap(new ConcurrentHashMap<>()); 223 224 @Override 225 public void accept(Path t) { 226 files.add(t); 227 } 228 229 public Set<Path> get() { 230 return Collections.unmodifiableSet(files); 231 } 232 } 233 234 // We may have multiple compaction running at the same time, and flush can also happen at the same 235 // time, so here we need to use a collection, and the collection needs to be thread safe. 236 // The implementation of StoreFileWriterCreationTracker is very simple and we will not likely to 237 // implement hashCode or equals for it, so here we just use ConcurrentHashMap. Changed to 238 // IdentityHashMap if later we want to implement hashCode or equals. 239 private final Set<StoreFileWriterCreationTracker> storeFileWriterCreationTrackers = 240 Collections.newSetFromMap(new ConcurrentHashMap<>()); 241 242 // For the SFT implementation which we will write tmp store file first, we do not need to clean up 243 // the broken store files under the data directory, which means we do not need to track the store 244 // file writer creation. So here we abstract a factory to return different trackers for different 245 // SFT implementations. 246 private final Supplier<StoreFileWriterCreationTracker> storeFileWriterCreationTrackerFactory; 247 248 private final boolean warmup; 249 250 /** 251 * Constructor 252 * @param family HColumnDescriptor for this column 253 * @param confParam configuration object failed. Can be null. 254 */ 255 protected HStore(final HRegion region, final ColumnFamilyDescriptor family, 256 final Configuration confParam, boolean warmup) throws IOException { 257 this.conf = StoreUtils.createStoreConfiguration(confParam, region.getTableDescriptor(), family); 258 259 this.region = region; 260 this.storeContext = initializeStoreContext(family); 261 262 // Assemble the store's home directory and Ensure it exists. 263 region.getRegionFileSystem().createStoreDir(family.getNameAsString()); 264 265 // set block storage policy for store directory 266 String policyName = family.getStoragePolicy(); 267 if (null == policyName) { 268 policyName = this.conf.get(BLOCK_STORAGE_POLICY_KEY, DEFAULT_BLOCK_STORAGE_POLICY); 269 } 270 region.getRegionFileSystem().setStoragePolicy(family.getNameAsString(), policyName.trim()); 271 272 this.dataBlockEncoder = new HFileDataBlockEncoderImpl(family.getDataBlockEncoding()); 273 274 // used by ScanQueryMatcher 275 long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); 276 LOG.trace("Time to purge deletes set to {}ms in {}", timeToPurgeDeletes, this); 277 // Get TTL 278 long ttl = determineTTLFromFamily(family); 279 // Why not just pass a HColumnDescriptor in here altogether? Even if have 280 // to clone it? 281 scanInfo = 282 new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.storeContext.getComparator()); 283 this.memstore = getMemstore(); 284 285 this.offPeakHours = OffPeakHours.getInstance(conf); 286 287 this.verifyBulkLoads = conf.getBoolean("hbase.hstore.bulkload.verify", false); 288 289 this.blockingFileCount = conf.getInt(BLOCKING_STOREFILES_KEY, DEFAULT_BLOCKING_STOREFILE_COUNT); 290 this.compactionCheckMultiplier = conf.getInt(COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY, 291 DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER); 292 if (this.compactionCheckMultiplier <= 0) { 293 LOG.error("Compaction check period multiplier must be positive, setting default: {}", 294 DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER); 295 this.compactionCheckMultiplier = DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER; 296 } 297 298 this.warmup = warmup; 299 this.storeEngine = createStoreEngine(this, this.conf, region.getCellComparator()); 300 storeEngine.initialize(warmup); 301 // if require writing to tmp dir first, then we just return null, which indicate that we do not 302 // need to track the creation of store file writer, otherwise we return a new 303 // StoreFileWriterCreationTracker. 304 this.storeFileWriterCreationTrackerFactory = storeEngine.requireWritingToTmpDirFirst() 305 ? () -> null 306 : () -> new StoreFileWriterCreationTracker(); 307 refreshStoreSizeAndTotalBytes(); 308 309 flushRetriesNumber = 310 conf.getInt("hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER); 311 pauseTime = conf.getInt(HConstants.HBASE_SERVER_PAUSE, HConstants.DEFAULT_HBASE_SERVER_PAUSE); 312 if (flushRetriesNumber <= 0) { 313 throw new IllegalArgumentException( 314 "hbase.hstore.flush.retries.number must be > 0, not " + flushRetriesNumber); 315 } 316 317 int confPrintThreshold = 318 this.conf.getInt("hbase.region.store.parallel.put.print.threshold", 50); 319 if (confPrintThreshold < 10) { 320 confPrintThreshold = 10; 321 } 322 this.parallelPutCountPrintThreshold = confPrintThreshold; 323 324 LOG.info( 325 "Store={}, memstore type={}, storagePolicy={}, verifyBulkLoads={}, " 326 + "parallelPutCountPrintThreshold={}, encoding={}, compression={}", 327 this, memstore.getClass().getSimpleName(), policyName, verifyBulkLoads, 328 parallelPutCountPrintThreshold, family.getDataBlockEncoding(), family.getCompressionType()); 329 } 330 331 private StoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException { 332 return new StoreContext.Builder().withBlockSize(family.getBlocksize()) 333 .withEncryptionContext(EncryptionUtil.createEncryptionContext(conf, family)) 334 .withBloomType(family.getBloomFilterType()).withCacheConfig(createCacheConf(family)) 335 .withCellComparator(region.getTableDescriptor().isMetaTable() || conf 336 .getBoolean(HRegion.USE_META_CELL_COMPARATOR, HRegion.DEFAULT_USE_META_CELL_COMPARATOR) 337 ? MetaCellComparator.META_COMPARATOR 338 : InnerStoreCellComparator.INNER_STORE_COMPARATOR) 339 .withColumnFamilyDescriptor(family).withCompactedFilesSupplier(this::getCompactedFiles) 340 .withRegionFileSystem(region.getRegionFileSystem()) 341 .withFavoredNodesSupplier(this::getFavoredNodes) 342 .withFamilyStoreDirectoryPath( 343 region.getRegionFileSystem().getStoreDir(family.getNameAsString())) 344 .withRegionCoprocessorHost(region.getCoprocessorHost()).build(); 345 } 346 347 private InetSocketAddress[] getFavoredNodes() { 348 InetSocketAddress[] favoredNodes = null; 349 if (region.getRegionServerServices() != null) { 350 favoredNodes = region.getRegionServerServices() 351 .getFavoredNodesForRegion(region.getRegionInfo().getEncodedName()); 352 } 353 return favoredNodes; 354 } 355 356 /** Returns MemStore Instance to use in this store. */ 357 private MemStore getMemstore() { 358 MemStore ms = null; 359 // Check if in-memory-compaction configured. Note MemoryCompactionPolicy is an enum! 360 MemoryCompactionPolicy inMemoryCompaction = null; 361 if (this.getTableName().isSystemTable()) { 362 inMemoryCompaction = MemoryCompactionPolicy 363 .valueOf(conf.get("hbase.systemtables.compacting.memstore.type", "NONE").toUpperCase()); 364 } else { 365 inMemoryCompaction = getColumnFamilyDescriptor().getInMemoryCompaction(); 366 } 367 if (inMemoryCompaction == null) { 368 inMemoryCompaction = 369 MemoryCompactionPolicy.valueOf(conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, 370 CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT).toUpperCase()); 371 } 372 373 switch (inMemoryCompaction) { 374 case NONE: 375 Class<? extends MemStore> memStoreClass = 376 conf.getClass(MEMSTORE_CLASS_NAME, DefaultMemStore.class, MemStore.class); 377 ms = ReflectionUtils.newInstance(memStoreClass, 378 new Object[] { conf, getComparator(), this.getHRegion().getRegionServicesForStores() }); 379 break; 380 default: 381 Class<? extends CompactingMemStore> compactingMemStoreClass = 382 conf.getClass(MEMSTORE_CLASS_NAME, CompactingMemStore.class, CompactingMemStore.class); 383 ms = 384 ReflectionUtils.newInstance(compactingMemStoreClass, new Object[] { conf, getComparator(), 385 this, this.getHRegion().getRegionServicesForStores(), inMemoryCompaction }); 386 } 387 return ms; 388 } 389 390 /** 391 * Creates the cache config. 392 * @param family The current column family. 393 */ 394 protected CacheConfig createCacheConf(final ColumnFamilyDescriptor family) { 395 CacheConfig cacheConf = new CacheConfig(conf, family, region.getBlockCache(), 396 region.getRegionServicesForStores().getByteBuffAllocator()); 397 LOG.info("Created cacheConfig: {}, for column family {} of region {} ", cacheConf, 398 family.getNameAsString(), region.getRegionInfo().getEncodedName()); 399 return cacheConf; 400 } 401 402 /** 403 * Creates the store engine configured for the given Store. 404 * @param store The store. An unfortunate dependency needed due to it being passed to 405 * coprocessors via the compactor. 406 * @param conf Store configuration. 407 * @param kvComparator KVComparator for storeFileManager. 408 * @return StoreEngine to use. 409 */ 410 protected StoreEngine<?, ?, ?, ?> createStoreEngine(HStore store, Configuration conf, 411 CellComparator kvComparator) throws IOException { 412 return StoreEngine.create(store, conf, kvComparator); 413 } 414 415 /** Returns TTL in seconds of the specified family */ 416 public static long determineTTLFromFamily(final ColumnFamilyDescriptor family) { 417 // HCD.getTimeToLive returns ttl in seconds. Convert to milliseconds. 418 long ttl = family.getTimeToLive(); 419 if (ttl == HConstants.FOREVER) { 420 // Default is unlimited ttl. 421 ttl = Long.MAX_VALUE; 422 } else if (ttl == -1) { 423 ttl = Long.MAX_VALUE; 424 } else { 425 // Second -> ms adjust for user data 426 ttl *= 1000; 427 } 428 return ttl; 429 } 430 431 StoreContext getStoreContext() { 432 return storeContext; 433 } 434 435 @Override 436 public String getColumnFamilyName() { 437 return this.storeContext.getFamily().getNameAsString(); 438 } 439 440 @Override 441 public TableName getTableName() { 442 return this.getRegionInfo().getTable(); 443 } 444 445 @Override 446 public FileSystem getFileSystem() { 447 return storeContext.getRegionFileSystem().getFileSystem(); 448 } 449 450 public HRegionFileSystem getRegionFileSystem() { 451 return storeContext.getRegionFileSystem(); 452 } 453 454 /* Implementation of StoreConfigInformation */ 455 @Override 456 public long getStoreFileTtl() { 457 // TTL only applies if there's no MIN_VERSIONs setting on the column. 458 return (this.scanInfo.getMinVersions() == 0) ? this.scanInfo.getTtl() : Long.MAX_VALUE; 459 } 460 461 @Override 462 public long getMemStoreFlushSize() { 463 // TODO: Why is this in here? The flushsize of the region rather than the store? St.Ack 464 return this.region.memstoreFlushSize; 465 } 466 467 @Override 468 public MemStoreSize getFlushableSize() { 469 return this.memstore.getFlushableSize(); 470 } 471 472 @Override 473 public MemStoreSize getSnapshotSize() { 474 return this.memstore.getSnapshotSize(); 475 } 476 477 @Override 478 public long getCompactionCheckMultiplier() { 479 return this.compactionCheckMultiplier; 480 } 481 482 @Override 483 public long getBlockingFileCount() { 484 return blockingFileCount; 485 } 486 /* End implementation of StoreConfigInformation */ 487 488 @Override 489 public ColumnFamilyDescriptor getColumnFamilyDescriptor() { 490 return this.storeContext.getFamily(); 491 } 492 493 @Override 494 public OptionalLong getMaxSequenceId() { 495 return StoreUtils.getMaxSequenceIdInList(this.getStorefiles()); 496 } 497 498 @Override 499 public OptionalLong getMaxMemStoreTS() { 500 return StoreUtils.getMaxMemStoreTSInList(this.getStorefiles()); 501 } 502 503 /** Returns the data block encoder */ 504 public HFileDataBlockEncoder getDataBlockEncoder() { 505 return dataBlockEncoder; 506 } 507 508 /** 509 * Should be used only in tests. 510 * @param blockEncoder the block delta encoder to use 511 */ 512 void setDataBlockEncoderInTest(HFileDataBlockEncoder blockEncoder) { 513 this.dataBlockEncoder = blockEncoder; 514 } 515 516 private void postRefreshStoreFiles() throws IOException { 517 // Advance the memstore read point to be at least the new store files seqIds so that 518 // readers might pick it up. This assumes that the store is not getting any writes (otherwise 519 // in-flight transactions might be made visible) 520 getMaxSequenceId().ifPresent(region.getMVCC()::advanceTo); 521 refreshStoreSizeAndTotalBytes(); 522 } 523 524 @Override 525 public void refreshStoreFiles() throws IOException { 526 storeEngine.refreshStoreFiles(); 527 postRefreshStoreFiles(); 528 } 529 530 /** 531 * Replaces the store files that the store has with the given files. Mainly used by secondary 532 * region replicas to keep up to date with the primary region files. 533 */ 534 public void refreshStoreFiles(Collection<String> newFiles) throws IOException { 535 storeEngine.refreshStoreFiles(newFiles); 536 postRefreshStoreFiles(); 537 } 538 539 /** 540 * This message intends to inform the MemStore that next coming updates are going to be part of 541 * the replaying edits from WAL 542 */ 543 public void startReplayingFromWAL() { 544 this.memstore.startReplayingFromWAL(); 545 } 546 547 /** 548 * This message intends to inform the MemStore that the replaying edits from WAL are done 549 */ 550 public void stopReplayingFromWAL() { 551 this.memstore.stopReplayingFromWAL(); 552 } 553 554 /** 555 * Adds a value to the memstore 556 */ 557 public void add(final Cell cell, MemStoreSizing memstoreSizing) { 558 storeEngine.readLock(); 559 try { 560 if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) { 561 LOG.trace("tableName={}, encodedName={}, columnFamilyName={} is too busy!", 562 this.getTableName(), this.getRegionInfo().getEncodedName(), this.getColumnFamilyName()); 563 } 564 this.memstore.add(cell, memstoreSizing); 565 } finally { 566 storeEngine.readUnlock(); 567 currentParallelPutCount.decrementAndGet(); 568 } 569 } 570 571 /** 572 * Adds the specified value to the memstore 573 */ 574 public void add(final Iterable<Cell> cells, MemStoreSizing memstoreSizing) { 575 storeEngine.readLock(); 576 try { 577 if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) { 578 LOG.trace("tableName={}, encodedName={}, columnFamilyName={} is too busy!", 579 this.getTableName(), this.getRegionInfo().getEncodedName(), this.getColumnFamilyName()); 580 } 581 memstore.add(cells, memstoreSizing); 582 } finally { 583 storeEngine.readUnlock(); 584 currentParallelPutCount.decrementAndGet(); 585 } 586 } 587 588 @Override 589 public long timeOfOldestEdit() { 590 return memstore.timeOfOldestEdit(); 591 } 592 593 /** Returns All store files. */ 594 @Override 595 public Collection<HStoreFile> getStorefiles() { 596 return this.storeEngine.getStoreFileManager().getStorefiles(); 597 } 598 599 @Override 600 public Collection<HStoreFile> getCompactedFiles() { 601 return this.storeEngine.getStoreFileManager().getCompactedfiles(); 602 } 603 604 /** 605 * This throws a WrongRegionException if the HFile does not fit in this region, or an 606 * InvalidHFileException if the HFile is not valid. 607 */ 608 public void assertBulkLoadHFileOk(Path srcPath) throws IOException { 609 HFile.Reader reader = null; 610 try { 611 LOG.info("Validating hfile at " + srcPath + " for inclusion in " + this); 612 FileSystem srcFs = srcPath.getFileSystem(conf); 613 srcFs.access(srcPath, FsAction.READ_WRITE); 614 reader = HFile.createReader(srcFs, srcPath, getCacheConfig(), isPrimaryReplicaStore(), conf); 615 616 Optional<byte[]> firstKey = reader.getFirstRowKey(); 617 Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); 618 Optional<Cell> lk = reader.getLastKey(); 619 Preconditions.checkState(lk.isPresent(), "Last key can not be null"); 620 byte[] lastKey = CellUtil.cloneRow(lk.get()); 621 622 if (LOG.isDebugEnabled()) { 623 LOG.debug("HFile bounds: first=" + Bytes.toStringBinary(firstKey.get()) + " last=" 624 + Bytes.toStringBinary(lastKey)); 625 LOG.debug("Region bounds: first=" + Bytes.toStringBinary(getRegionInfo().getStartKey()) 626 + " last=" + Bytes.toStringBinary(getRegionInfo().getEndKey())); 627 } 628 629 if (!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) { 630 throw new WrongRegionException("Bulk load file " + srcPath.toString() 631 + " does not fit inside region " + this.getRegionInfo().getRegionNameAsString()); 632 } 633 634 if ( 635 reader.length() 636 > conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE) 637 ) { 638 LOG.warn("Trying to bulk load hfile " + srcPath + " with size: " + reader.length() 639 + " bytes can be problematic as it may lead to oversplitting."); 640 } 641 642 if (verifyBulkLoads) { 643 long verificationStartTime = EnvironmentEdgeManager.currentTime(); 644 LOG.info("Full verification started for bulk load hfile: {}", srcPath); 645 Cell prevCell = null; 646 HFileScanner scanner = reader.getScanner(conf, false, false, false); 647 scanner.seekTo(); 648 do { 649 Cell cell = scanner.getCell(); 650 if (prevCell != null) { 651 if (getComparator().compareRows(prevCell, cell) > 0) { 652 throw new InvalidHFileException("Previous row is greater than" + " current row: path=" 653 + srcPath + " previous=" + CellUtil.getCellKeyAsString(prevCell) + " current=" 654 + CellUtil.getCellKeyAsString(cell)); 655 } 656 if (CellComparator.getInstance().compareFamilies(prevCell, cell) != 0) { 657 throw new InvalidHFileException("Previous key had different" 658 + " family compared to current key: path=" + srcPath + " previous=" 659 + Bytes.toStringBinary(prevCell.getFamilyArray(), prevCell.getFamilyOffset(), 660 prevCell.getFamilyLength()) 661 + " current=" + Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(), 662 cell.getFamilyLength())); 663 } 664 } 665 prevCell = cell; 666 } while (scanner.next()); 667 LOG.info("Full verification complete for bulk load hfile: " + srcPath.toString() + " took " 668 + (EnvironmentEdgeManager.currentTime() - verificationStartTime) + " ms"); 669 } 670 } finally { 671 if (reader != null) { 672 reader.close(); 673 } 674 } 675 } 676 677 /** 678 * This method should only be called from Region. It is assumed that the ranges of values in the 679 * HFile fit within the stores assigned region. (assertBulkLoadHFileOk checks this) 680 * @param seqNum sequence Id associated with the HFile 681 */ 682 public Pair<Path, Path> preBulkLoadHFile(String srcPathStr, long seqNum) throws IOException { 683 Path srcPath = new Path(srcPathStr); 684 return getRegionFileSystem().bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); 685 } 686 687 public Path bulkLoadHFile(byte[] family, String srcPathStr, Path dstPath) throws IOException { 688 Path srcPath = new Path(srcPathStr); 689 try { 690 getRegionFileSystem().commitStoreFile(srcPath, dstPath); 691 } finally { 692 if (this.getCoprocessorHost() != null) { 693 this.getCoprocessorHost().postCommitStoreFile(family, srcPath, dstPath); 694 } 695 } 696 697 LOG.info("Loaded HFile " + srcPath + " into " + this + " as " + dstPath 698 + " - updating store file list."); 699 700 HStoreFile sf = storeEngine.createStoreFileAndReader(dstPath); 701 bulkLoadHFile(sf); 702 703 LOG.info("Successfully loaded {} into {} (new location: {})", srcPath, this, dstPath); 704 705 return dstPath; 706 } 707 708 public void bulkLoadHFile(StoreFileInfo fileInfo) throws IOException { 709 HStoreFile sf = storeEngine.createStoreFileAndReader(fileInfo); 710 bulkLoadHFile(sf); 711 } 712 713 private void bulkLoadHFile(HStoreFile sf) throws IOException { 714 StoreFileReader r = sf.getReader(); 715 this.storeSize.addAndGet(r.length()); 716 this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); 717 storeEngine.addStoreFiles(Lists.newArrayList(sf), () -> { 718 }); 719 LOG.info("Loaded HFile " + sf.getFileInfo() + " into " + this); 720 if (LOG.isTraceEnabled()) { 721 String traceMessage = "BULK LOAD time,size,store size,store files [" 722 + EnvironmentEdgeManager.currentTime() + "," + r.length() + "," + storeSize + "," 723 + storeEngine.getStoreFileManager().getStorefileCount() + "]"; 724 LOG.trace(traceMessage); 725 } 726 } 727 728 private ImmutableCollection<HStoreFile> closeWithoutLock() throws IOException { 729 memstore.close(); 730 // Clear so metrics doesn't find them. 731 ImmutableCollection<HStoreFile> result = storeEngine.getStoreFileManager().clearFiles(); 732 Collection<HStoreFile> compactedfiles = storeEngine.getStoreFileManager().clearCompactedFiles(); 733 // clear the compacted files 734 if (CollectionUtils.isNotEmpty(compactedfiles)) { 735 removeCompactedfiles(compactedfiles, 736 getCacheConfig() != null ? getCacheConfig().shouldEvictOnClose() : true); 737 } 738 if (!result.isEmpty()) { 739 // initialize the thread pool for closing store files in parallel. 740 ThreadPoolExecutor storeFileCloserThreadPool = 741 this.region.getStoreFileOpenAndCloseThreadPool("StoreFileCloser-" 742 + this.region.getRegionInfo().getEncodedName() + "-" + this.getColumnFamilyName()); 743 744 // close each store file in parallel 745 CompletionService<Void> completionService = 746 new ExecutorCompletionService<>(storeFileCloserThreadPool); 747 for (HStoreFile f : result) { 748 completionService.submit(new Callable<Void>() { 749 @Override 750 public Void call() throws IOException { 751 boolean evictOnClose = 752 getCacheConfig() != null ? getCacheConfig().shouldEvictOnClose() : true; 753 f.closeStoreFile(!warmup && evictOnClose); 754 return null; 755 } 756 }); 757 } 758 759 IOException ioe = null; 760 try { 761 for (int i = 0; i < result.size(); i++) { 762 try { 763 Future<Void> future = completionService.take(); 764 future.get(); 765 } catch (InterruptedException e) { 766 if (ioe == null) { 767 ioe = new InterruptedIOException(); 768 ioe.initCause(e); 769 } 770 } catch (ExecutionException e) { 771 if (ioe == null) { 772 ioe = new IOException(e.getCause()); 773 } 774 } 775 } 776 } finally { 777 storeFileCloserThreadPool.shutdownNow(); 778 } 779 if (ioe != null) { 780 throw ioe; 781 } 782 } 783 LOG.trace("Closed {}", this); 784 return result; 785 } 786 787 /** 788 * Close all the readers We don't need to worry about subsequent requests because the Region holds 789 * a write lock that will prevent any more reads or writes. 790 * @return the {@link StoreFile StoreFiles} that were previously being used. 791 * @throws IOException on failure 792 */ 793 public ImmutableCollection<HStoreFile> close() throws IOException { 794 // findbugs can not recognize storeEngine.writeLock is just a lock operation so it will report 795 // UL_UNRELEASED_LOCK_EXCEPTION_PATH, so here we have to use two try finally... 796 // Change later if findbugs becomes smarter in the future. 797 this.archiveLock.lock(); 798 try { 799 this.storeEngine.writeLock(); 800 try { 801 return closeWithoutLock(); 802 } finally { 803 this.storeEngine.writeUnlock(); 804 } 805 } finally { 806 this.archiveLock.unlock(); 807 } 808 } 809 810 /** 811 * Write out current snapshot. Presumes {@code StoreFlusherImpl.prepare()} has been called 812 * previously. 813 * @param logCacheFlushId flush sequence number 814 * @return The path name of the tmp file to which the store was flushed 815 * @throws IOException if exception occurs during process 816 */ 817 protected List<Path> flushCache(final long logCacheFlushId, MemStoreSnapshot snapshot, 818 MonitoredTask status, ThroughputController throughputController, FlushLifeCycleTracker tracker, 819 Consumer<Path> writerCreationTracker) throws IOException { 820 // If an exception happens flushing, we let it out without clearing 821 // the memstore snapshot. The old snapshot will be returned when we say 822 // 'snapshot', the next time flush comes around. 823 // Retry after catching exception when flushing, otherwise server will abort 824 // itself 825 StoreFlusher flusher = storeEngine.getStoreFlusher(); 826 IOException lastException = null; 827 for (int i = 0; i < flushRetriesNumber; i++) { 828 try { 829 List<Path> pathNames = flusher.flushSnapshot(snapshot, logCacheFlushId, status, 830 throughputController, tracker, writerCreationTracker); 831 Path lastPathName = null; 832 try { 833 for (Path pathName : pathNames) { 834 lastPathName = pathName; 835 storeEngine.validateStoreFile(pathName); 836 } 837 return pathNames; 838 } catch (Exception e) { 839 LOG.warn("Failed validating store file {}, retrying num={}", lastPathName, i, e); 840 if (e instanceof IOException) { 841 lastException = (IOException) e; 842 } else { 843 lastException = new IOException(e); 844 } 845 } 846 } catch (IOException e) { 847 LOG.warn("Failed flushing store file for {}, retrying num={}", this, i, e); 848 lastException = e; 849 } 850 if (lastException != null && i < (flushRetriesNumber - 1)) { 851 try { 852 Thread.sleep(pauseTime); 853 } catch (InterruptedException e) { 854 IOException iie = new InterruptedIOException(); 855 iie.initCause(e); 856 throw iie; 857 } 858 } 859 } 860 throw lastException; 861 } 862 863 public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { 864 LOG.info("Validating recovered hfile at {} for inclusion in store {}", path, this); 865 FileSystem srcFs = path.getFileSystem(conf); 866 srcFs.access(path, FsAction.READ_WRITE); 867 try (HFile.Reader reader = 868 HFile.createReader(srcFs, path, getCacheConfig(), isPrimaryReplicaStore(), conf)) { 869 Optional<byte[]> firstKey = reader.getFirstRowKey(); 870 Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); 871 Optional<Cell> lk = reader.getLastKey(); 872 Preconditions.checkState(lk.isPresent(), "Last key can not be null"); 873 byte[] lastKey = CellUtil.cloneRow(lk.get()); 874 if (!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) { 875 throw new WrongRegionException("Recovered hfile " + path.toString() 876 + " does not fit inside region " + this.getRegionInfo().getRegionNameAsString()); 877 } 878 } 879 880 Path dstPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), path); 881 HStoreFile sf = storeEngine.createStoreFileAndReader(dstPath); 882 StoreFileReader r = sf.getReader(); 883 this.storeSize.addAndGet(r.length()); 884 this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); 885 886 storeEngine.addStoreFiles(Lists.newArrayList(sf), () -> { 887 }); 888 889 LOG.info("Loaded recovered hfile to {}, entries={}, sequenceid={}, filesize={}", sf, 890 r.getEntries(), r.getSequenceID(), TraditionalBinaryPrefix.long2String(r.length(), "B", 1)); 891 return sf; 892 } 893 894 private long getTotalSize(Collection<HStoreFile> sfs) { 895 return sfs.stream().mapToLong(sf -> sf.getReader().length()).sum(); 896 } 897 898 private boolean completeFlush(final List<HStoreFile> sfs, long snapshotId) throws IOException { 899 // NOTE:we should keep clearSnapshot method inside the write lock because clearSnapshot may 900 // close {@link DefaultMemStore#snapshot}, which may be used by 901 // {@link DefaultMemStore#getScanners}. 902 storeEngine.addStoreFiles(sfs, 903 // NOTE: here we must increase the refCount for storeFiles because we would open the 904 // storeFiles and get the StoreFileScanners for them in HStore.notifyChangedReadersObservers. 905 // If we don't increase the refCount here, HStore.closeAndArchiveCompactedFiles called by 906 // CompactedHFilesDischarger may archive the storeFiles after a concurrent compaction.Because 907 // HStore.requestCompaction is under storeEngine lock, so here we increase the refCount under 908 // storeEngine lock. see HBASE-27519 for more details. 909 snapshotId > 0 ? () -> { 910 this.memstore.clearSnapshot(snapshotId); 911 HStoreFile.increaseStoreFilesRefeCount(sfs); 912 } : () -> { 913 HStoreFile.increaseStoreFilesRefeCount(sfs); 914 }); 915 // notify to be called here - only in case of flushes 916 try { 917 notifyChangedReadersObservers(sfs); 918 } finally { 919 HStoreFile.decreaseStoreFilesRefeCount(sfs); 920 } 921 if (LOG.isTraceEnabled()) { 922 long totalSize = getTotalSize(sfs); 923 String traceMessage = "FLUSH time,count,size,store size,store files [" 924 + EnvironmentEdgeManager.currentTime() + "," + sfs.size() + "," + totalSize + "," 925 + storeSize + "," + storeEngine.getStoreFileManager().getStorefileCount() + "]"; 926 LOG.trace(traceMessage); 927 } 928 return needsCompaction(); 929 } 930 931 /** 932 * Notify all observers that set of Readers has changed. 933 */ 934 private void notifyChangedReadersObservers(List<HStoreFile> sfs) throws IOException { 935 for (ChangedReadersObserver o : this.changedReaderObservers) { 936 List<KeyValueScanner> memStoreScanners; 937 this.storeEngine.readLock(); 938 try { 939 memStoreScanners = this.memstore.getScanners(o.getReadPoint()); 940 } finally { 941 this.storeEngine.readUnlock(); 942 } 943 o.updateReaders(sfs, memStoreScanners); 944 } 945 } 946 947 /** 948 * Get all scanners with no filtering based on TTL (that happens further down the line). 949 * @param cacheBlocks cache the blocks or not 950 * @param usePread true to use pread, false if not 951 * @param isCompaction true if the scanner is created for compaction 952 * @param matcher the scan query matcher 953 * @param startRow the start row 954 * @param stopRow the stop row 955 * @param readPt the read point of the current scan 956 * @return all scanners for this store 957 */ 958 public List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean isGet, boolean usePread, 959 boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, byte[] stopRow, long readPt) 960 throws IOException { 961 return getScanners(cacheBlocks, usePread, isCompaction, matcher, startRow, true, stopRow, false, 962 readPt); 963 } 964 965 /** 966 * Get all scanners with no filtering based on TTL (that happens further down the line). 967 * @param cacheBlocks cache the blocks or not 968 * @param usePread true to use pread, false if not 969 * @param isCompaction true if the scanner is created for compaction 970 * @param matcher the scan query matcher 971 * @param startRow the start row 972 * @param includeStartRow true to include start row, false if not 973 * @param stopRow the stop row 974 * @param includeStopRow true to include stop row, false if not 975 * @param readPt the read point of the current scan 976 * @return all scanners for this store 977 */ 978 public List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean usePread, 979 boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, boolean includeStartRow, 980 byte[] stopRow, boolean includeStopRow, long readPt) throws IOException { 981 Collection<HStoreFile> storeFilesToScan; 982 List<KeyValueScanner> memStoreScanners; 983 this.storeEngine.readLock(); 984 try { 985 storeFilesToScan = this.storeEngine.getStoreFileManager().getFilesForScan(startRow, 986 includeStartRow, stopRow, includeStopRow); 987 memStoreScanners = this.memstore.getScanners(readPt); 988 // NOTE: here we must increase the refCount for storeFiles because we would open the 989 // storeFiles and get the StoreFileScanners for them.If we don't increase the refCount here, 990 // HStore.closeAndArchiveCompactedFiles called by CompactedHFilesDischarger may archive the 991 // storeFiles after a concurrent compaction.Because HStore.requestCompaction is under 992 // storeEngine lock, so here we increase the refCount under storeEngine lock. see HBASE-27484 993 // for more details. 994 HStoreFile.increaseStoreFilesRefeCount(storeFilesToScan); 995 } finally { 996 this.storeEngine.readUnlock(); 997 } 998 try { 999 // First the store file scanners 1000 1001 // TODO this used to get the store files in descending order, 1002 // but now we get them in ascending order, which I think is 1003 // actually more correct, since memstore get put at the end. 1004 List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles( 1005 storeFilesToScan, cacheBlocks, usePread, isCompaction, false, matcher, readPt); 1006 List<KeyValueScanner> scanners = new ArrayList<>(sfScanners.size() + 1); 1007 scanners.addAll(sfScanners); 1008 // Then the memstore scanners 1009 scanners.addAll(memStoreScanners); 1010 return scanners; 1011 } catch (Throwable t) { 1012 clearAndClose(memStoreScanners); 1013 throw t instanceof IOException ? (IOException) t : new IOException(t); 1014 } finally { 1015 HStoreFile.decreaseStoreFilesRefeCount(storeFilesToScan); 1016 } 1017 } 1018 1019 private static void clearAndClose(List<KeyValueScanner> scanners) { 1020 if (scanners == null) { 1021 return; 1022 } 1023 for (KeyValueScanner s : scanners) { 1024 s.close(); 1025 } 1026 scanners.clear(); 1027 } 1028 1029 /** 1030 * Create scanners on the given files and if needed on the memstore with no filtering based on TTL 1031 * (that happens further down the line). 1032 * @param files the list of files on which the scanners has to be created 1033 * @param cacheBlocks cache the blocks or not 1034 * @param usePread true to use pread, false if not 1035 * @param isCompaction true if the scanner is created for compaction 1036 * @param matcher the scan query matcher 1037 * @param startRow the start row 1038 * @param stopRow the stop row 1039 * @param readPt the read point of the current scan 1040 * @param includeMemstoreScanner true if memstore has to be included 1041 * @return scanners on the given files and on the memstore if specified 1042 */ 1043 public List<KeyValueScanner> getScanners(List<HStoreFile> files, boolean cacheBlocks, 1044 boolean isGet, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, 1045 byte[] startRow, byte[] stopRow, long readPt, boolean includeMemstoreScanner) 1046 throws IOException { 1047 return getScanners(files, cacheBlocks, usePread, isCompaction, matcher, startRow, true, stopRow, 1048 false, readPt, includeMemstoreScanner); 1049 } 1050 1051 /** 1052 * Create scanners on the given files and if needed on the memstore with no filtering based on TTL 1053 * (that happens further down the line). 1054 * @param files the list of files on which the scanners has to be created 1055 * @param cacheBlocks ache the blocks or not 1056 * @param usePread true to use pread, false if not 1057 * @param isCompaction true if the scanner is created for compaction 1058 * @param matcher the scan query matcher 1059 * @param startRow the start row 1060 * @param includeStartRow true to include start row, false if not 1061 * @param stopRow the stop row 1062 * @param includeStopRow true to include stop row, false if not 1063 * @param readPt the read point of the current scan 1064 * @param includeMemstoreScanner true if memstore has to be included 1065 * @return scanners on the given files and on the memstore if specified 1066 */ 1067 public List<KeyValueScanner> getScanners(List<HStoreFile> files, boolean cacheBlocks, 1068 boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, 1069 boolean includeStartRow, byte[] stopRow, boolean includeStopRow, long readPt, 1070 boolean includeMemstoreScanner) throws IOException { 1071 List<KeyValueScanner> memStoreScanners = null; 1072 if (includeMemstoreScanner) { 1073 this.storeEngine.readLock(); 1074 try { 1075 memStoreScanners = this.memstore.getScanners(readPt); 1076 } finally { 1077 this.storeEngine.readUnlock(); 1078 } 1079 } 1080 try { 1081 List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles(files, 1082 cacheBlocks, usePread, isCompaction, false, matcher, readPt); 1083 List<KeyValueScanner> scanners = new ArrayList<>(sfScanners.size() + 1); 1084 scanners.addAll(sfScanners); 1085 // Then the memstore scanners 1086 if (memStoreScanners != null) { 1087 scanners.addAll(memStoreScanners); 1088 } 1089 return scanners; 1090 } catch (Throwable t) { 1091 clearAndClose(memStoreScanners); 1092 throw t instanceof IOException ? (IOException) t : new IOException(t); 1093 } 1094 } 1095 1096 /** 1097 * @param o Observer who wants to know about changes in set of Readers 1098 */ 1099 public void addChangedReaderObserver(ChangedReadersObserver o) { 1100 this.changedReaderObservers.add(o); 1101 } 1102 1103 /** 1104 * @param o Observer no longer interested in changes in set of Readers. 1105 */ 1106 public void deleteChangedReaderObserver(ChangedReadersObserver o) { 1107 // We don't check if observer present; it may not be (legitimately) 1108 this.changedReaderObservers.remove(o); 1109 } 1110 1111 ////////////////////////////////////////////////////////////////////////////// 1112 // Compaction 1113 ////////////////////////////////////////////////////////////////////////////// 1114 1115 /** 1116 * Compact the StoreFiles. This method may take some time, so the calling thread must be able to 1117 * block for long periods. 1118 * <p> 1119 * During this time, the Store can work as usual, getting values from StoreFiles and writing new 1120 * StoreFiles from the memstore. Existing StoreFiles are not destroyed until the new compacted 1121 * StoreFile is completely written-out to disk. 1122 * <p> 1123 * The compactLock prevents multiple simultaneous compactions. The structureLock prevents us from 1124 * interfering with other write operations. 1125 * <p> 1126 * We don't want to hold the structureLock for the whole time, as a compact() can be lengthy and 1127 * we want to allow cache-flushes during this period. 1128 * <p> 1129 * Compaction event should be idempotent, since there is no IO Fencing for the region directory in 1130 * hdfs. A region server might still try to complete the compaction after it lost the region. That 1131 * is why the following events are carefully ordered for a compaction: 1. Compaction writes new 1132 * files under region/.tmp directory (compaction output) 2. Compaction atomically moves the 1133 * temporary file under region directory 3. Compaction appends a WAL edit containing the 1134 * compaction input and output files. Forces sync on WAL. 4. Compaction deletes the input files 1135 * from the region directory. Failure conditions are handled like this: - If RS fails before 2, 1136 * compaction wont complete. Even if RS lives on and finishes the compaction later, it will only 1137 * write the new data file to the region directory. Since we already have this data, this will be 1138 * idempotent but we will have a redundant copy of the data. - If RS fails between 2 and 3, the 1139 * region will have a redundant copy of the data. The RS that failed won't be able to finish 1140 * sync() for WAL because of lease recovery in WAL. - If RS fails after 3, the region region 1141 * server who opens the region will pick up the the compaction marker from the WAL and replay it 1142 * by removing the compaction input files. Failed RS can also attempt to delete those files, but 1143 * the operation will be idempotent See HBASE-2231 for details. 1144 * @param compaction compaction details obtained from requestCompaction() 1145 * @return Storefile we compacted into or null if we failed or opted out early. 1146 */ 1147 public List<HStoreFile> compact(CompactionContext compaction, 1148 ThroughputController throughputController, User user) throws IOException { 1149 assert compaction != null; 1150 CompactionRequestImpl cr = compaction.getRequest(); 1151 StoreFileWriterCreationTracker writerCreationTracker = 1152 storeFileWriterCreationTrackerFactory.get(); 1153 if (writerCreationTracker != null) { 1154 cr.setWriterCreationTracker(writerCreationTracker); 1155 storeFileWriterCreationTrackers.add(writerCreationTracker); 1156 } 1157 try { 1158 // Do all sanity checking in here if we have a valid CompactionRequestImpl 1159 // because we need to clean up after it on the way out in a finally 1160 // block below 1161 long compactionStartTime = EnvironmentEdgeManager.currentTime(); 1162 assert compaction.hasSelection(); 1163 Collection<HStoreFile> filesToCompact = cr.getFiles(); 1164 assert !filesToCompact.isEmpty(); 1165 synchronized (filesCompacting) { 1166 // sanity check: we're compacting files that this store knows about 1167 // TODO: change this to LOG.error() after more debugging 1168 Preconditions.checkArgument(filesCompacting.containsAll(filesToCompact)); 1169 } 1170 1171 // Ready to go. Have list of files to compact. 1172 LOG.info("Starting compaction of " + filesToCompact + " into tmpdir=" 1173 + getRegionFileSystem().getTempDir() + ", totalSize=" 1174 + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1)); 1175 1176 return doCompaction(cr, filesToCompact, user, compactionStartTime, 1177 compaction.compact(throughputController, user)); 1178 } finally { 1179 finishCompactionRequest(cr); 1180 } 1181 } 1182 1183 protected List<HStoreFile> doCompaction(CompactionRequestImpl cr, 1184 Collection<HStoreFile> filesToCompact, User user, long compactionStartTime, List<Path> newFiles) 1185 throws IOException { 1186 // Do the steps necessary to complete the compaction. 1187 setStoragePolicyFromFileName(newFiles); 1188 List<HStoreFile> sfs = storeEngine.commitStoreFiles(newFiles, true); 1189 if (this.getCoprocessorHost() != null) { 1190 for (HStoreFile sf : sfs) { 1191 getCoprocessorHost().postCompact(this, sf, cr.getTracker(), cr, user); 1192 } 1193 } 1194 replaceStoreFiles(filesToCompact, sfs, true); 1195 1196 long outputBytes = getTotalSize(sfs); 1197 1198 // At this point the store will use new files for all new scanners. 1199 refreshStoreSizeAndTotalBytes(); // update store size. 1200 1201 long now = EnvironmentEdgeManager.currentTime(); 1202 if ( 1203 region.getRegionServerServices() != null 1204 && region.getRegionServerServices().getMetrics() != null 1205 ) { 1206 region.getRegionServerServices().getMetrics().updateCompaction( 1207 region.getTableDescriptor().getTableName().getNameAsString(), cr.isMajor(), 1208 now - compactionStartTime, cr.getFiles().size(), newFiles.size(), cr.getSize(), 1209 outputBytes); 1210 1211 } 1212 1213 logCompactionEndMessage(cr, sfs, now, compactionStartTime); 1214 return sfs; 1215 } 1216 1217 // Set correct storage policy from the file name of DTCP. 1218 // Rename file will not change the storage policy. 1219 private void setStoragePolicyFromFileName(List<Path> newFiles) throws IOException { 1220 String prefix = HConstants.STORAGE_POLICY_PREFIX; 1221 for (Path newFile : newFiles) { 1222 if (newFile.getParent().getName().startsWith(prefix)) { 1223 CommonFSUtils.setStoragePolicy(getRegionFileSystem().getFileSystem(), newFile, 1224 newFile.getParent().getName().substring(prefix.length())); 1225 } 1226 } 1227 } 1228 1229 /** 1230 * Writes the compaction WAL record. 1231 * @param filesCompacted Files compacted (input). 1232 * @param newFiles Files from compaction. 1233 */ 1234 private void writeCompactionWalRecord(Collection<HStoreFile> filesCompacted, 1235 Collection<HStoreFile> newFiles) throws IOException { 1236 if (region.getWAL() == null) { 1237 return; 1238 } 1239 List<Path> inputPaths = 1240 filesCompacted.stream().map(HStoreFile::getPath).collect(Collectors.toList()); 1241 List<Path> outputPaths = 1242 newFiles.stream().map(HStoreFile::getPath).collect(Collectors.toList()); 1243 RegionInfo info = this.region.getRegionInfo(); 1244 CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(info, 1245 getColumnFamilyDescriptor().getName(), inputPaths, outputPaths, 1246 getRegionFileSystem().getStoreDir(getColumnFamilyDescriptor().getNameAsString())); 1247 // Fix reaching into Region to get the maxWaitForSeqId. 1248 // Does this method belong in Region altogether given it is making so many references up there? 1249 // Could be Region#writeCompactionMarker(compactionDescriptor); 1250 WALUtil.writeCompactionMarker(this.region.getWAL(), this.region.getReplicationScope(), 1251 this.region.getRegionInfo(), compactionDescriptor, this.region.getMVCC()); 1252 } 1253 1254 @RestrictedApi(explanation = "Should only be called in TestHStore", link = "", 1255 allowedOnPath = ".*/(HStore|TestHStore).java") 1256 void replaceStoreFiles(Collection<HStoreFile> compactedFiles, Collection<HStoreFile> result, 1257 boolean writeCompactionMarker) throws IOException { 1258 storeEngine.replaceStoreFiles(compactedFiles, result, () -> { 1259 if (writeCompactionMarker) { 1260 writeCompactionWalRecord(compactedFiles, result); 1261 } 1262 }, () -> { 1263 synchronized (filesCompacting) { 1264 filesCompacting.removeAll(compactedFiles); 1265 } 1266 }); 1267 // These may be null when the RS is shutting down. The space quota Chores will fix the Region 1268 // sizes later so it's not super-critical if we miss these. 1269 RegionServerServices rsServices = region.getRegionServerServices(); 1270 if (rsServices != null && rsServices.getRegionServerSpaceQuotaManager() != null) { 1271 updateSpaceQuotaAfterFileReplacement( 1272 rsServices.getRegionServerSpaceQuotaManager().getRegionSizeStore(), getRegionInfo(), 1273 compactedFiles, result); 1274 } 1275 } 1276 1277 /** 1278 * Updates the space quota usage for this region, removing the size for files compacted away and 1279 * adding in the size for new files. 1280 * @param sizeStore The object tracking changes in region size for space quotas. 1281 * @param regionInfo The identifier for the region whose size is being updated. 1282 * @param oldFiles Files removed from this store's region. 1283 * @param newFiles Files added to this store's region. 1284 */ 1285 void updateSpaceQuotaAfterFileReplacement(RegionSizeStore sizeStore, RegionInfo regionInfo, 1286 Collection<HStoreFile> oldFiles, Collection<HStoreFile> newFiles) { 1287 long delta = 0; 1288 if (oldFiles != null) { 1289 for (HStoreFile compactedFile : oldFiles) { 1290 if (compactedFile.isHFile()) { 1291 delta -= compactedFile.getReader().length(); 1292 } 1293 } 1294 } 1295 if (newFiles != null) { 1296 for (HStoreFile newFile : newFiles) { 1297 if (newFile.isHFile()) { 1298 delta += newFile.getReader().length(); 1299 } 1300 } 1301 } 1302 sizeStore.incrementRegionSize(regionInfo, delta); 1303 } 1304 1305 /** 1306 * Log a very elaborate compaction completion message. 1307 * @param cr Request. 1308 * @param sfs Resulting files. 1309 * @param compactionStartTime Start time. 1310 */ 1311 private void logCompactionEndMessage(CompactionRequestImpl cr, List<HStoreFile> sfs, long now, 1312 long compactionStartTime) { 1313 StringBuilder message = new StringBuilder("Completed" + (cr.isMajor() ? " major" : "") 1314 + " compaction of " + cr.getFiles().size() + (cr.isAllFiles() ? " (all)" : "") 1315 + " file(s) in " + this + " of " + this.getRegionInfo().getShortNameToLog() + " into "); 1316 if (sfs.isEmpty()) { 1317 message.append("none, "); 1318 } else { 1319 for (HStoreFile sf : sfs) { 1320 message.append(sf.getPath().getName()); 1321 message.append("(size="); 1322 message.append(TraditionalBinaryPrefix.long2String(sf.getReader().length(), "", 1)); 1323 message.append("), "); 1324 } 1325 } 1326 message.append("total size for store is ") 1327 .append(StringUtils.TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1)) 1328 .append(". This selection was in queue for ") 1329 .append(StringUtils.formatTimeDiff(compactionStartTime, cr.getSelectionTime())) 1330 .append(", and took ").append(StringUtils.formatTimeDiff(now, compactionStartTime)) 1331 .append(" to execute."); 1332 LOG.info(message.toString()); 1333 if (LOG.isTraceEnabled()) { 1334 int fileCount = storeEngine.getStoreFileManager().getStorefileCount(); 1335 long resultSize = getTotalSize(sfs); 1336 String traceMessage = "COMPACTION start,end,size out,files in,files out,store size," 1337 + "store files [" + compactionStartTime + "," + now + "," + resultSize + "," 1338 + cr.getFiles().size() + "," + sfs.size() + "," + storeSize + "," + fileCount + "]"; 1339 LOG.trace(traceMessage); 1340 } 1341 } 1342 1343 /** 1344 * Call to complete a compaction. Its for the case where we find in the WAL a compaction that was 1345 * not finished. We could find one recovering a WAL after a regionserver crash. See HBASE-2231. 1346 */ 1347 public void replayCompactionMarker(CompactionDescriptor compaction, boolean pickCompactionFiles, 1348 boolean removeFiles) throws IOException { 1349 LOG.debug("Completing compaction from the WAL marker"); 1350 List<String> compactionInputs = compaction.getCompactionInputList(); 1351 List<String> compactionOutputs = Lists.newArrayList(compaction.getCompactionOutputList()); 1352 1353 // The Compaction Marker is written after the compaction is completed, 1354 // and the files moved into the region/family folder. 1355 // 1356 // If we crash after the entry is written, we may not have removed the 1357 // input files, but the output file is present. 1358 // (The unremoved input files will be removed by this function) 1359 // 1360 // If we scan the directory and the file is not present, it can mean that: 1361 // - The file was manually removed by the user 1362 // - The file was removed as consequence of subsequent compaction 1363 // so, we can't do anything with the "compaction output list" because those 1364 // files have already been loaded when opening the region (by virtue of 1365 // being in the store's folder) or they may be missing due to a compaction. 1366 1367 String familyName = this.getColumnFamilyName(); 1368 Set<String> inputFiles = new HashSet<>(); 1369 for (String compactionInput : compactionInputs) { 1370 Path inputPath = getRegionFileSystem().getStoreFilePath(familyName, compactionInput); 1371 inputFiles.add(inputPath.getName()); 1372 } 1373 1374 // some of the input files might already be deleted 1375 List<HStoreFile> inputStoreFiles = new ArrayList<>(compactionInputs.size()); 1376 for (HStoreFile sf : this.getStorefiles()) { 1377 if (inputFiles.contains(sf.getPath().getName())) { 1378 inputStoreFiles.add(sf); 1379 } 1380 } 1381 1382 // check whether we need to pick up the new files 1383 List<HStoreFile> outputStoreFiles = new ArrayList<>(compactionOutputs.size()); 1384 1385 if (pickCompactionFiles) { 1386 for (HStoreFile sf : this.getStorefiles()) { 1387 compactionOutputs.remove(sf.getPath().getName()); 1388 } 1389 for (String compactionOutput : compactionOutputs) { 1390 StoreFileInfo storeFileInfo = 1391 getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), compactionOutput); 1392 HStoreFile storeFile = storeEngine.createStoreFileAndReader(storeFileInfo); 1393 outputStoreFiles.add(storeFile); 1394 } 1395 } 1396 1397 if (!inputStoreFiles.isEmpty() || !outputStoreFiles.isEmpty()) { 1398 LOG.info("Replaying compaction marker, replacing input files: " + inputStoreFiles 1399 + " with output files : " + outputStoreFiles); 1400 this.replaceStoreFiles(inputStoreFiles, outputStoreFiles, false); 1401 this.refreshStoreSizeAndTotalBytes(); 1402 } 1403 } 1404 1405 @Override 1406 public boolean hasReferences() { 1407 // Grab the read lock here, because we need to ensure that: only when the atomic 1408 // replaceStoreFiles(..) finished, we can get all the complete store file list. 1409 this.storeEngine.readLock(); 1410 try { 1411 // Merge the current store files with compacted files here due to HBASE-20940. 1412 Collection<HStoreFile> allStoreFiles = new ArrayList<>(getStorefiles()); 1413 allStoreFiles.addAll(getCompactedFiles()); 1414 return StoreUtils.hasReferences(allStoreFiles); 1415 } finally { 1416 this.storeEngine.readUnlock(); 1417 } 1418 } 1419 1420 /** 1421 * getter for CompactionProgress object 1422 * @return CompactionProgress object; can be null 1423 */ 1424 public CompactionProgress getCompactionProgress() { 1425 return this.storeEngine.getCompactor().getProgress(); 1426 } 1427 1428 @Override 1429 public boolean shouldPerformMajorCompaction() throws IOException { 1430 for (HStoreFile sf : this.storeEngine.getStoreFileManager().getStorefiles()) { 1431 // TODO: what are these reader checks all over the place? 1432 if (sf.getReader() == null) { 1433 LOG.debug("StoreFile {} has null Reader", sf); 1434 return false; 1435 } 1436 } 1437 return storeEngine.getCompactionPolicy() 1438 .shouldPerformMajorCompaction(this.storeEngine.getStoreFileManager().getStorefiles()); 1439 } 1440 1441 public Optional<CompactionContext> requestCompaction() throws IOException { 1442 return requestCompaction(NO_PRIORITY, CompactionLifeCycleTracker.DUMMY, null); 1443 } 1444 1445 public Optional<CompactionContext> requestCompaction(int priority, 1446 CompactionLifeCycleTracker tracker, User user) throws IOException { 1447 // don't even select for compaction if writes are disabled 1448 if (!this.areWritesEnabled()) { 1449 return Optional.empty(); 1450 } 1451 // Before we do compaction, try to get rid of unneeded files to simplify things. 1452 removeUnneededFiles(); 1453 1454 final CompactionContext compaction = storeEngine.createCompaction(); 1455 CompactionRequestImpl request = null; 1456 this.storeEngine.readLock(); 1457 try { 1458 synchronized (filesCompacting) { 1459 // First, see if coprocessor would want to override selection. 1460 if (this.getCoprocessorHost() != null) { 1461 final List<HStoreFile> candidatesForCoproc = compaction.preSelect(this.filesCompacting); 1462 boolean override = 1463 getCoprocessorHost().preCompactSelection(this, candidatesForCoproc, tracker, user); 1464 if (override) { 1465 // Coprocessor is overriding normal file selection. 1466 compaction.forceSelect(new CompactionRequestImpl(candidatesForCoproc)); 1467 } 1468 } 1469 1470 // Normal case - coprocessor is not overriding file selection. 1471 if (!compaction.hasSelection()) { 1472 boolean isUserCompaction = priority == Store.PRIORITY_USER; 1473 boolean mayUseOffPeak = 1474 offPeakHours.isOffPeakHour() && offPeakCompactionTracker.compareAndSet(false, true); 1475 try { 1476 compaction.select(this.filesCompacting, isUserCompaction, mayUseOffPeak, 1477 forceMajor && filesCompacting.isEmpty()); 1478 } catch (IOException e) { 1479 if (mayUseOffPeak) { 1480 offPeakCompactionTracker.set(false); 1481 } 1482 throw e; 1483 } 1484 assert compaction.hasSelection(); 1485 if (mayUseOffPeak && !compaction.getRequest().isOffPeak()) { 1486 // Compaction policy doesn't want to take advantage of off-peak. 1487 offPeakCompactionTracker.set(false); 1488 } 1489 } 1490 if (this.getCoprocessorHost() != null) { 1491 this.getCoprocessorHost().postCompactSelection(this, 1492 ImmutableList.copyOf(compaction.getRequest().getFiles()), tracker, 1493 compaction.getRequest(), user); 1494 } 1495 // Finally, we have the resulting files list. Check if we have any files at all. 1496 request = compaction.getRequest(); 1497 Collection<HStoreFile> selectedFiles = request.getFiles(); 1498 if (selectedFiles.isEmpty()) { 1499 return Optional.empty(); 1500 } 1501 1502 addToCompactingFiles(selectedFiles); 1503 1504 // If we're enqueuing a major, clear the force flag. 1505 this.forceMajor = this.forceMajor && !request.isMajor(); 1506 1507 // Set common request properties. 1508 // Set priority, either override value supplied by caller or from store. 1509 final int compactionPriority = 1510 (priority != Store.NO_PRIORITY) ? priority : getCompactPriority(); 1511 request.setPriority(compactionPriority); 1512 1513 if (request.isAfterSplit()) { 1514 // If the store belongs to recently splitted daughter regions, better we consider 1515 // them with the higher priority in the compaction queue. 1516 // Override priority if it is lower (higher int value) than 1517 // SPLIT_REGION_COMPACTION_PRIORITY 1518 final int splitHousekeepingPriority = 1519 Math.min(compactionPriority, SPLIT_REGION_COMPACTION_PRIORITY); 1520 request.setPriority(splitHousekeepingPriority); 1521 LOG.info( 1522 "Keeping/Overriding Compaction request priority to {} for CF {} since it" 1523 + " belongs to recently split daughter region {}", 1524 splitHousekeepingPriority, this.getColumnFamilyName(), 1525 getRegionInfo().getRegionNameAsString()); 1526 } 1527 request.setDescription(getRegionInfo().getRegionNameAsString(), getColumnFamilyName()); 1528 request.setTracker(tracker); 1529 } 1530 } finally { 1531 this.storeEngine.readUnlock(); 1532 } 1533 1534 if (LOG.isDebugEnabled()) { 1535 LOG.debug(this + " is initiating " + (request.isMajor() ? "major" : "minor") + " compaction" 1536 + (request.isAllFiles() ? " (all files)" : "")); 1537 } 1538 this.region.reportCompactionRequestStart(request.isMajor()); 1539 return Optional.of(compaction); 1540 } 1541 1542 /** Adds the files to compacting files. filesCompacting must be locked. */ 1543 private void addToCompactingFiles(Collection<HStoreFile> filesToAdd) { 1544 if (CollectionUtils.isEmpty(filesToAdd)) { 1545 return; 1546 } 1547 // Check that we do not try to compact the same StoreFile twice. 1548 if (!Collections.disjoint(filesCompacting, filesToAdd)) { 1549 Preconditions.checkArgument(false, "%s overlaps with %s", filesToAdd, filesCompacting); 1550 } 1551 filesCompacting.addAll(filesToAdd); 1552 Collections.sort(filesCompacting, storeEngine.getStoreFileManager().getStoreFileComparator()); 1553 } 1554 1555 private void removeUnneededFiles() throws IOException { 1556 if (!conf.getBoolean("hbase.store.delete.expired.storefile", true)) { 1557 return; 1558 } 1559 if (getColumnFamilyDescriptor().getMinVersions() > 0) { 1560 LOG.debug("Skipping expired store file removal due to min version of {} being {}", this, 1561 getColumnFamilyDescriptor().getMinVersions()); 1562 return; 1563 } 1564 this.storeEngine.readLock(); 1565 Collection<HStoreFile> delSfs = null; 1566 try { 1567 synchronized (filesCompacting) { 1568 long cfTtl = getStoreFileTtl(); 1569 if (cfTtl != Long.MAX_VALUE) { 1570 delSfs = storeEngine.getStoreFileManager() 1571 .getUnneededFiles(EnvironmentEdgeManager.currentTime() - cfTtl, filesCompacting); 1572 addToCompactingFiles(delSfs); 1573 } 1574 } 1575 } finally { 1576 this.storeEngine.readUnlock(); 1577 } 1578 1579 if (CollectionUtils.isEmpty(delSfs)) { 1580 return; 1581 } 1582 1583 Collection<HStoreFile> newFiles = Collections.emptyList(); // No new files. 1584 replaceStoreFiles(delSfs, newFiles, true); 1585 refreshStoreSizeAndTotalBytes(); 1586 LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in " + this 1587 + "; total size is " + TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1)); 1588 } 1589 1590 public void cancelRequestedCompaction(CompactionContext compaction) { 1591 finishCompactionRequest(compaction.getRequest()); 1592 } 1593 1594 protected void finishCompactionRequest(CompactionRequestImpl cr) { 1595 this.region.reportCompactionRequestEnd(cr.isMajor(), cr.getFiles().size(), cr.getSize()); 1596 if (cr.isOffPeak()) { 1597 offPeakCompactionTracker.set(false); 1598 cr.setOffPeak(false); 1599 } 1600 synchronized (filesCompacting) { 1601 filesCompacting.removeAll(cr.getFiles()); 1602 } 1603 // The tracker could be null, for example, we do not need to track the creation of store file 1604 // writer due to different implementation of SFT, or the compaction is canceled. 1605 if (cr.getWriterCreationTracker() != null) { 1606 storeFileWriterCreationTrackers.remove(cr.getWriterCreationTracker()); 1607 } 1608 } 1609 1610 /** 1611 * Update counts. 1612 */ 1613 protected void refreshStoreSizeAndTotalBytes() throws IOException { 1614 this.storeSize.set(0L); 1615 this.totalUncompressedBytes.set(0L); 1616 for (HStoreFile hsf : this.storeEngine.getStoreFileManager().getStorefiles()) { 1617 StoreFileReader r = hsf.getReader(); 1618 if (r == null) { 1619 LOG.debug("StoreFile {} has a null Reader", hsf); 1620 continue; 1621 } 1622 this.storeSize.addAndGet(r.length()); 1623 this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); 1624 } 1625 } 1626 1627 /* 1628 * @param wantedVersions How many versions were asked for. 1629 * @return wantedVersions or this families' {@link HConstants#VERSIONS}. 1630 */ 1631 int versionsToReturn(final int wantedVersions) { 1632 if (wantedVersions <= 0) { 1633 throw new IllegalArgumentException("Number of versions must be > 0"); 1634 } 1635 // Make sure we do not return more than maximum versions for this store. 1636 int maxVersions = getColumnFamilyDescriptor().getMaxVersions(); 1637 return wantedVersions > maxVersions ? maxVersions : wantedVersions; 1638 } 1639 1640 @Override 1641 public boolean canSplit() { 1642 // Not split-able if we find a reference store file present in the store. 1643 boolean result = !hasReferences(); 1644 if (!result) { 1645 LOG.trace("Not splittable; has references: {}", this); 1646 } 1647 return result; 1648 } 1649 1650 /** 1651 * Determines if Store should be split. 1652 */ 1653 public Optional<byte[]> getSplitPoint() { 1654 this.storeEngine.readLock(); 1655 try { 1656 // Should already be enforced by the split policy! 1657 assert !this.getRegionInfo().isMetaRegion(); 1658 // Not split-able if we find a reference store file present in the store. 1659 if (hasReferences()) { 1660 LOG.trace("Not splittable; has references: {}", this); 1661 return Optional.empty(); 1662 } 1663 return this.storeEngine.getStoreFileManager().getSplitPoint(); 1664 } catch (IOException e) { 1665 LOG.warn("Failed getting store size for {}", this, e); 1666 } finally { 1667 this.storeEngine.readUnlock(); 1668 } 1669 return Optional.empty(); 1670 } 1671 1672 @Override 1673 public long getLastCompactSize() { 1674 return this.lastCompactSize; 1675 } 1676 1677 @Override 1678 public long getSize() { 1679 return storeSize.get(); 1680 } 1681 1682 public void triggerMajorCompaction() { 1683 this.forceMajor = true; 1684 } 1685 1686 ////////////////////////////////////////////////////////////////////////////// 1687 // File administration 1688 ////////////////////////////////////////////////////////////////////////////// 1689 1690 /** 1691 * Return a scanner for both the memstore and the HStore files. Assumes we are not in a 1692 * compaction. 1693 * @param scan Scan to apply when scanning the stores 1694 * @param targetCols columns to scan 1695 * @return a scanner over the current key values 1696 * @throws IOException on failure 1697 */ 1698 public KeyValueScanner getScanner(Scan scan, final NavigableSet<byte[]> targetCols, long readPt) 1699 throws IOException { 1700 storeEngine.readLock(); 1701 try { 1702 ScanInfo scanInfo; 1703 if (this.getCoprocessorHost() != null) { 1704 scanInfo = this.getCoprocessorHost().preStoreScannerOpen(this, scan); 1705 } else { 1706 scanInfo = getScanInfo(); 1707 } 1708 return createScanner(scan, scanInfo, targetCols, readPt); 1709 } finally { 1710 storeEngine.readUnlock(); 1711 } 1712 } 1713 1714 // HMobStore will override this method to return its own implementation. 1715 protected KeyValueScanner createScanner(Scan scan, ScanInfo scanInfo, 1716 NavigableSet<byte[]> targetCols, long readPt) throws IOException { 1717 return scan.isReversed() 1718 ? new ReversedStoreScanner(this, scanInfo, scan, targetCols, readPt) 1719 : new StoreScanner(this, scanInfo, scan, targetCols, readPt); 1720 } 1721 1722 /** 1723 * Recreates the scanners on the current list of active store file scanners 1724 * @param currentFileScanners the current set of active store file scanners 1725 * @param cacheBlocks cache the blocks or not 1726 * @param usePread use pread or not 1727 * @param isCompaction is the scanner for compaction 1728 * @param matcher the scan query matcher 1729 * @param startRow the scan's start row 1730 * @param includeStartRow should the scan include the start row 1731 * @param stopRow the scan's stop row 1732 * @param includeStopRow should the scan include the stop row 1733 * @param readPt the read point of the current scane 1734 * @param includeMemstoreScanner whether the current scanner should include memstorescanner 1735 * @return list of scanners recreated on the current Scanners 1736 */ 1737 public List<KeyValueScanner> recreateScanners(List<KeyValueScanner> currentFileScanners, 1738 boolean cacheBlocks, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, 1739 byte[] startRow, boolean includeStartRow, byte[] stopRow, boolean includeStopRow, long readPt, 1740 boolean includeMemstoreScanner) throws IOException { 1741 this.storeEngine.readLock(); 1742 try { 1743 Map<String, HStoreFile> name2File = 1744 new HashMap<>(getStorefilesCount() + getCompactedFilesCount()); 1745 for (HStoreFile file : getStorefiles()) { 1746 name2File.put(file.getFileInfo().getActiveFileName(), file); 1747 } 1748 Collection<HStoreFile> compactedFiles = getCompactedFiles(); 1749 for (HStoreFile file : IterableUtils.emptyIfNull(compactedFiles)) { 1750 name2File.put(file.getFileInfo().getActiveFileName(), file); 1751 } 1752 List<HStoreFile> filesToReopen = new ArrayList<>(); 1753 for (KeyValueScanner kvs : currentFileScanners) { 1754 assert kvs.isFileScanner(); 1755 if (kvs.peek() == null) { 1756 continue; 1757 } 1758 filesToReopen.add(name2File.get(kvs.getFilePath().getName())); 1759 } 1760 if (filesToReopen.isEmpty()) { 1761 return null; 1762 } 1763 return getScanners(filesToReopen, cacheBlocks, false, false, matcher, startRow, 1764 includeStartRow, stopRow, includeStopRow, readPt, false); 1765 } finally { 1766 this.storeEngine.readUnlock(); 1767 } 1768 } 1769 1770 @Override 1771 public String toString() { 1772 return this.getRegionInfo().getShortNameToLog() + "/" + this.getColumnFamilyName(); 1773 } 1774 1775 @Override 1776 public int getStorefilesCount() { 1777 return this.storeEngine.getStoreFileManager().getStorefileCount(); 1778 } 1779 1780 @Override 1781 public int getCompactedFilesCount() { 1782 return this.storeEngine.getStoreFileManager().getCompactedFilesCount(); 1783 } 1784 1785 private LongStream getStoreFileAgeStream() { 1786 return this.storeEngine.getStoreFileManager().getStorefiles().stream().filter(sf -> { 1787 if (sf.getReader() == null) { 1788 LOG.debug("StoreFile {} has a null Reader", sf); 1789 return false; 1790 } else { 1791 return true; 1792 } 1793 }).filter(HStoreFile::isHFile).mapToLong(sf -> sf.getFileInfo().getCreatedTimestamp()) 1794 .map(t -> EnvironmentEdgeManager.currentTime() - t); 1795 } 1796 1797 @Override 1798 public OptionalLong getMaxStoreFileAge() { 1799 return getStoreFileAgeStream().max(); 1800 } 1801 1802 @Override 1803 public OptionalLong getMinStoreFileAge() { 1804 return getStoreFileAgeStream().min(); 1805 } 1806 1807 @Override 1808 public OptionalDouble getAvgStoreFileAge() { 1809 return getStoreFileAgeStream().average(); 1810 } 1811 1812 @Override 1813 public long getNumReferenceFiles() { 1814 return this.storeEngine.getStoreFileManager().getStorefiles().stream() 1815 .filter(HStoreFile::isReference).count(); 1816 } 1817 1818 @Override 1819 public long getNumHFiles() { 1820 return this.storeEngine.getStoreFileManager().getStorefiles().stream() 1821 .filter(HStoreFile::isHFile).count(); 1822 } 1823 1824 @Override 1825 public long getStoreSizeUncompressed() { 1826 return this.totalUncompressedBytes.get(); 1827 } 1828 1829 @Override 1830 public long getStorefilesSize() { 1831 // Include all StoreFiles 1832 return StoreUtils.getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(), 1833 sf -> true); 1834 } 1835 1836 @Override 1837 public long getHFilesSize() { 1838 // Include only StoreFiles which are HFiles 1839 return StoreUtils.getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(), 1840 HStoreFile::isHFile); 1841 } 1842 1843 private long getStorefilesFieldSize(ToLongFunction<StoreFileReader> f) { 1844 return this.storeEngine.getStoreFileManager().getStorefiles().stream() 1845 .mapToLong(file -> StoreUtils.getStorefileFieldSize(file, f)).sum(); 1846 } 1847 1848 @Override 1849 public long getStorefilesRootLevelIndexSize() { 1850 return getStorefilesFieldSize(StoreFileReader::indexSize); 1851 } 1852 1853 @Override 1854 public long getTotalStaticIndexSize() { 1855 return getStorefilesFieldSize(StoreFileReader::getUncompressedDataIndexSize); 1856 } 1857 1858 @Override 1859 public long getTotalStaticBloomSize() { 1860 return getStorefilesFieldSize(StoreFileReader::getTotalBloomSize); 1861 } 1862 1863 @Override 1864 public MemStoreSize getMemStoreSize() { 1865 return this.memstore.size(); 1866 } 1867 1868 @Override 1869 public int getCompactPriority() { 1870 int priority = this.storeEngine.getStoreFileManager().getStoreCompactionPriority(); 1871 if (priority == PRIORITY_USER) { 1872 LOG.warn("Compaction priority is USER despite there being no user compaction"); 1873 } 1874 return priority; 1875 } 1876 1877 public boolean throttleCompaction(long compactionSize) { 1878 return storeEngine.getCompactionPolicy().throttleCompaction(compactionSize); 1879 } 1880 1881 public HRegion getHRegion() { 1882 return this.region; 1883 } 1884 1885 public RegionCoprocessorHost getCoprocessorHost() { 1886 return this.region.getCoprocessorHost(); 1887 } 1888 1889 @Override 1890 public RegionInfo getRegionInfo() { 1891 return getRegionFileSystem().getRegionInfo(); 1892 } 1893 1894 @Override 1895 public boolean areWritesEnabled() { 1896 return this.region.areWritesEnabled(); 1897 } 1898 1899 @Override 1900 public long getSmallestReadPoint() { 1901 return this.region.getSmallestReadPoint(); 1902 } 1903 1904 /** 1905 * Adds or replaces the specified KeyValues. 1906 * <p> 1907 * For each KeyValue specified, if a cell with the same row, family, and qualifier exists in 1908 * MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore. 1909 * <p> 1910 * This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic 1911 * across all of them. 1912 * @param readpoint readpoint below which we can safely remove duplicate KVs 1913 */ 1914 public void upsert(Iterable<Cell> cells, long readpoint, MemStoreSizing memstoreSizing) { 1915 this.storeEngine.readLock(); 1916 try { 1917 this.memstore.upsert(cells, readpoint, memstoreSizing); 1918 } finally { 1919 this.storeEngine.readUnlock(); 1920 } 1921 } 1922 1923 public StoreFlushContext createFlushContext(long cacheFlushId, FlushLifeCycleTracker tracker) { 1924 return new StoreFlusherImpl(cacheFlushId, tracker); 1925 } 1926 1927 private final class StoreFlusherImpl implements StoreFlushContext { 1928 1929 private final FlushLifeCycleTracker tracker; 1930 private final StoreFileWriterCreationTracker writerCreationTracker; 1931 private final long cacheFlushSeqNum; 1932 private MemStoreSnapshot snapshot; 1933 private List<Path> tempFiles; 1934 private List<Path> committedFiles; 1935 private long cacheFlushCount; 1936 private long cacheFlushSize; 1937 private long outputFileSize; 1938 1939 private StoreFlusherImpl(long cacheFlushSeqNum, FlushLifeCycleTracker tracker) { 1940 this.cacheFlushSeqNum = cacheFlushSeqNum; 1941 this.tracker = tracker; 1942 this.writerCreationTracker = storeFileWriterCreationTrackerFactory.get(); 1943 } 1944 1945 /** 1946 * This is not thread safe. The caller should have a lock on the region or the store. If 1947 * necessary, the lock can be added with the patch provided in HBASE-10087 1948 */ 1949 @Override 1950 public MemStoreSize prepare() { 1951 // passing the current sequence number of the wal - to allow bookkeeping in the memstore 1952 this.snapshot = memstore.snapshot(); 1953 this.cacheFlushCount = snapshot.getCellsCount(); 1954 this.cacheFlushSize = snapshot.getDataSize(); 1955 committedFiles = new ArrayList<>(1); 1956 return snapshot.getMemStoreSize(); 1957 } 1958 1959 @Override 1960 public void flushCache(MonitoredTask status) throws IOException { 1961 RegionServerServices rsService = region.getRegionServerServices(); 1962 ThroughputController throughputController = 1963 rsService == null ? null : rsService.getFlushThroughputController(); 1964 // it could be null if we do not need to track the creation of store file writer due to 1965 // different SFT implementation. 1966 if (writerCreationTracker != null) { 1967 HStore.this.storeFileWriterCreationTrackers.add(writerCreationTracker); 1968 } 1969 tempFiles = HStore.this.flushCache(cacheFlushSeqNum, snapshot, status, throughputController, 1970 tracker, writerCreationTracker); 1971 } 1972 1973 @Override 1974 public boolean commit(MonitoredTask status) throws IOException { 1975 try { 1976 if (CollectionUtils.isEmpty(this.tempFiles)) { 1977 return false; 1978 } 1979 status.setStatus("Flushing " + this + ": reopening flushed file"); 1980 List<HStoreFile> storeFiles = storeEngine.commitStoreFiles(tempFiles, false); 1981 for (HStoreFile sf : storeFiles) { 1982 StoreFileReader r = sf.getReader(); 1983 if (LOG.isInfoEnabled()) { 1984 LOG.info("Added {}, entries={}, sequenceid={}, filesize={}", sf, r.getEntries(), 1985 cacheFlushSeqNum, TraditionalBinaryPrefix.long2String(r.length(), "", 1)); 1986 } 1987 outputFileSize += r.length(); 1988 storeSize.addAndGet(r.length()); 1989 totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); 1990 committedFiles.add(sf.getPath()); 1991 } 1992 1993 flushedCellsCount.addAndGet(cacheFlushCount); 1994 flushedCellsSize.addAndGet(cacheFlushSize); 1995 flushedOutputFileSize.addAndGet(outputFileSize); 1996 // call coprocessor after we have done all the accounting above 1997 for (HStoreFile sf : storeFiles) { 1998 if (getCoprocessorHost() != null) { 1999 getCoprocessorHost().postFlush(HStore.this, sf, tracker); 2000 } 2001 } 2002 // Add new file to store files. Clear snapshot too while we have the Store write lock. 2003 return completeFlush(storeFiles, snapshot.getId()); 2004 } finally { 2005 if (writerCreationTracker != null) { 2006 HStore.this.storeFileWriterCreationTrackers.remove(writerCreationTracker); 2007 } 2008 } 2009 } 2010 2011 @Override 2012 public long getOutputFileSize() { 2013 return outputFileSize; 2014 } 2015 2016 @Override 2017 public List<Path> getCommittedFiles() { 2018 return committedFiles; 2019 } 2020 2021 /** 2022 * Similar to commit, but called in secondary region replicas for replaying the flush cache from 2023 * primary region. Adds the new files to the store, and drops the snapshot depending on 2024 * dropMemstoreSnapshot argument. 2025 * @param fileNames names of the flushed files 2026 * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot 2027 */ 2028 @Override 2029 public void replayFlush(List<String> fileNames, boolean dropMemstoreSnapshot) 2030 throws IOException { 2031 List<HStoreFile> storeFiles = new ArrayList<>(fileNames.size()); 2032 for (String file : fileNames) { 2033 // open the file as a store file (hfile link, etc) 2034 StoreFileInfo storeFileInfo = 2035 getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file); 2036 HStoreFile storeFile = storeEngine.createStoreFileAndReader(storeFileInfo); 2037 storeFiles.add(storeFile); 2038 HStore.this.storeSize.addAndGet(storeFile.getReader().length()); 2039 HStore.this.totalUncompressedBytes 2040 .addAndGet(storeFile.getReader().getTotalUncompressedBytes()); 2041 if (LOG.isInfoEnabled()) { 2042 LOG.info(this + " added " + storeFile + ", entries=" + storeFile.getReader().getEntries() 2043 + ", sequenceid=" + storeFile.getReader().getSequenceID() + ", filesize=" 2044 + TraditionalBinaryPrefix.long2String(storeFile.getReader().length(), "", 1)); 2045 } 2046 } 2047 2048 long snapshotId = -1; // -1 means do not drop 2049 if (dropMemstoreSnapshot && snapshot != null) { 2050 snapshotId = snapshot.getId(); 2051 } 2052 HStore.this.completeFlush(storeFiles, snapshotId); 2053 } 2054 2055 /** 2056 * Abort the snapshot preparation. Drops the snapshot if any. 2057 */ 2058 @Override 2059 public void abort() throws IOException { 2060 if (snapshot != null) { 2061 HStore.this.completeFlush(Collections.emptyList(), snapshot.getId()); 2062 } 2063 } 2064 } 2065 2066 @Override 2067 public boolean needsCompaction() { 2068 List<HStoreFile> filesCompactingClone = null; 2069 synchronized (filesCompacting) { 2070 filesCompactingClone = Lists.newArrayList(filesCompacting); 2071 } 2072 return this.storeEngine.needsCompaction(filesCompactingClone); 2073 } 2074 2075 /** 2076 * Used for tests. 2077 * @return cache configuration for this Store. 2078 */ 2079 public CacheConfig getCacheConfig() { 2080 return storeContext.getCacheConf(); 2081 } 2082 2083 public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false); 2084 2085 public static final long DEEP_OVERHEAD = ClassSize.align( 2086 FIXED_OVERHEAD + ClassSize.OBJECT + ClassSize.REENTRANT_LOCK + ClassSize.CONCURRENT_SKIPLISTMAP 2087 + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + ClassSize.OBJECT + ScanInfo.FIXED_OVERHEAD); 2088 2089 @Override 2090 public long heapSize() { 2091 MemStoreSize memstoreSize = this.memstore.size(); 2092 return DEEP_OVERHEAD + memstoreSize.getHeapSize() + storeContext.heapSize(); 2093 } 2094 2095 @Override 2096 public CellComparator getComparator() { 2097 return storeContext.getComparator(); 2098 } 2099 2100 public ScanInfo getScanInfo() { 2101 return scanInfo; 2102 } 2103 2104 /** 2105 * Set scan info, used by test 2106 * @param scanInfo new scan info to use for test 2107 */ 2108 void setScanInfo(ScanInfo scanInfo) { 2109 this.scanInfo = scanInfo; 2110 } 2111 2112 @Override 2113 public boolean hasTooManyStoreFiles() { 2114 return getStorefilesCount() > this.blockingFileCount; 2115 } 2116 2117 @Override 2118 public long getFlushedCellsCount() { 2119 return flushedCellsCount.get(); 2120 } 2121 2122 @Override 2123 public long getFlushedCellsSize() { 2124 return flushedCellsSize.get(); 2125 } 2126 2127 @Override 2128 public long getFlushedOutputFileSize() { 2129 return flushedOutputFileSize.get(); 2130 } 2131 2132 @Override 2133 public long getCompactedCellsCount() { 2134 return compactedCellsCount.get(); 2135 } 2136 2137 @Override 2138 public long getCompactedCellsSize() { 2139 return compactedCellsSize.get(); 2140 } 2141 2142 @Override 2143 public long getMajorCompactedCellsCount() { 2144 return majorCompactedCellsCount.get(); 2145 } 2146 2147 @Override 2148 public long getMajorCompactedCellsSize() { 2149 return majorCompactedCellsSize.get(); 2150 } 2151 2152 public void updateCompactedMetrics(boolean isMajor, CompactionProgress progress) { 2153 if (isMajor) { 2154 majorCompactedCellsCount.addAndGet(progress.getTotalCompactingKVs()); 2155 majorCompactedCellsSize.addAndGet(progress.totalCompactedSize); 2156 } else { 2157 compactedCellsCount.addAndGet(progress.getTotalCompactingKVs()); 2158 compactedCellsSize.addAndGet(progress.totalCompactedSize); 2159 } 2160 } 2161 2162 /** 2163 * Returns the StoreEngine that is backing this concrete implementation of Store. 2164 * @return Returns the {@link StoreEngine} object used internally inside this HStore object. 2165 */ 2166 public StoreEngine<?, ?, ?, ?> getStoreEngine() { 2167 return this.storeEngine; 2168 } 2169 2170 protected OffPeakHours getOffPeakHours() { 2171 return this.offPeakHours; 2172 } 2173 2174 @Override 2175 public void onConfigurationChange(Configuration conf) { 2176 Configuration storeConf = StoreUtils.createStoreConfiguration(conf, region.getTableDescriptor(), 2177 getColumnFamilyDescriptor()); 2178 this.conf = storeConf; 2179 this.storeEngine.compactionPolicy.setConf(storeConf); 2180 this.offPeakHours = OffPeakHours.getInstance(storeConf); 2181 } 2182 2183 /** 2184 * {@inheritDoc} 2185 */ 2186 @Override 2187 public void registerChildren(ConfigurationManager manager) { 2188 CacheConfig cacheConfig = this.storeContext.getCacheConf(); 2189 if (cacheConfig != null) { 2190 manager.registerObserver(cacheConfig); 2191 } 2192 } 2193 2194 /** 2195 * {@inheritDoc} 2196 */ 2197 @Override 2198 public void deregisterChildren(ConfigurationManager manager) { 2199 // No children to deregister 2200 } 2201 2202 @Override 2203 public double getCompactionPressure() { 2204 return storeEngine.getStoreFileManager().getCompactionPressure(); 2205 } 2206 2207 @Override 2208 public boolean isPrimaryReplicaStore() { 2209 return getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID; 2210 } 2211 2212 /** 2213 * Sets the store up for a region level snapshot operation. 2214 * @see #postSnapshotOperation() 2215 */ 2216 public void preSnapshotOperation() { 2217 archiveLock.lock(); 2218 } 2219 2220 /** 2221 * Perform tasks needed after the completion of snapshot operation. 2222 * @see #preSnapshotOperation() 2223 */ 2224 public void postSnapshotOperation() { 2225 archiveLock.unlock(); 2226 } 2227 2228 /** 2229 * Closes and archives the compacted files under this store 2230 */ 2231 public synchronized void closeAndArchiveCompactedFiles() throws IOException { 2232 // ensure other threads do not attempt to archive the same files on close() 2233 archiveLock.lock(); 2234 try { 2235 storeEngine.readLock(); 2236 Collection<HStoreFile> copyCompactedfiles = null; 2237 try { 2238 Collection<HStoreFile> compactedfiles = 2239 this.getStoreEngine().getStoreFileManager().getCompactedfiles(); 2240 if (CollectionUtils.isNotEmpty(compactedfiles)) { 2241 // Do a copy under read lock 2242 copyCompactedfiles = new ArrayList<>(compactedfiles); 2243 } else { 2244 LOG.trace("No compacted files to archive"); 2245 } 2246 } finally { 2247 storeEngine.readUnlock(); 2248 } 2249 if (CollectionUtils.isNotEmpty(copyCompactedfiles)) { 2250 removeCompactedfiles(copyCompactedfiles, true); 2251 } 2252 } finally { 2253 archiveLock.unlock(); 2254 } 2255 } 2256 2257 /** 2258 * Archives and removes the compacted files 2259 * @param compactedfiles The compacted files in this store that are not active in reads 2260 * @param evictOnClose true if blocks should be evicted from the cache when an HFile reader is 2261 * closed, false if not 2262 */ 2263 private void removeCompactedfiles(Collection<HStoreFile> compactedfiles, boolean evictOnClose) 2264 throws IOException { 2265 final List<HStoreFile> filesToRemove = new ArrayList<>(compactedfiles.size()); 2266 final List<Long> storeFileSizes = new ArrayList<>(compactedfiles.size()); 2267 for (final HStoreFile file : compactedfiles) { 2268 synchronized (file) { 2269 try { 2270 StoreFileReader r = file.getReader(); 2271 if (r == null) { 2272 LOG.debug("The file {} was closed but still not archived", file); 2273 // HACK: Temporarily re-open the reader so we can get the size of the file. Ideally, 2274 // we should know the size of an HStoreFile without having to ask the HStoreFileReader 2275 // for that. 2276 long length = getStoreFileSize(file); 2277 filesToRemove.add(file); 2278 storeFileSizes.add(length); 2279 continue; 2280 } 2281 2282 if (file.isCompactedAway() && !file.isReferencedInReads()) { 2283 // Even if deleting fails we need not bother as any new scanners won't be 2284 // able to use the compacted file as the status is already compactedAway 2285 LOG.trace("Closing and archiving the file {}", file); 2286 // Copy the file size before closing the reader 2287 final long length = r.length(); 2288 r.close(evictOnClose); 2289 // Just close and return 2290 filesToRemove.add(file); 2291 // Only add the length if we successfully added the file to `filesToRemove` 2292 storeFileSizes.add(length); 2293 } else { 2294 LOG.info("Can't archive compacted file " + file.getPath() 2295 + " because of either isCompactedAway=" + file.isCompactedAway() 2296 + " or file has reference, isReferencedInReads=" + file.isReferencedInReads() 2297 + ", refCount=" + r.getRefCount() + ", skipping for now."); 2298 } 2299 } catch (Exception e) { 2300 LOG.error("Exception while trying to close the compacted store file {}", file.getPath(), 2301 e); 2302 } 2303 } 2304 } 2305 if (this.isPrimaryReplicaStore()) { 2306 // Only the primary region is allowed to move the file to archive. 2307 // The secondary region does not move the files to archive. Any active reads from 2308 // the secondary region will still work because the file as such has active readers on it. 2309 if (!filesToRemove.isEmpty()) { 2310 LOG.debug("Moving the files {} to archive", filesToRemove); 2311 // Only if this is successful it has to be removed 2312 try { 2313 getRegionFileSystem().removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), 2314 filesToRemove); 2315 } catch (FailedArchiveException fae) { 2316 // Even if archiving some files failed, we still need to clear out any of the 2317 // files which were successfully archived. Otherwise we will receive a 2318 // FileNotFoundException when we attempt to re-archive them in the next go around. 2319 Collection<Path> failedFiles = fae.getFailedFiles(); 2320 Iterator<HStoreFile> iter = filesToRemove.iterator(); 2321 Iterator<Long> sizeIter = storeFileSizes.iterator(); 2322 while (iter.hasNext()) { 2323 sizeIter.next(); 2324 if (failedFiles.contains(iter.next().getPath())) { 2325 iter.remove(); 2326 sizeIter.remove(); 2327 } 2328 } 2329 if (!filesToRemove.isEmpty()) { 2330 clearCompactedfiles(filesToRemove); 2331 } 2332 throw fae; 2333 } 2334 } 2335 } 2336 if (!filesToRemove.isEmpty()) { 2337 // Clear the compactedfiles from the store file manager 2338 clearCompactedfiles(filesToRemove); 2339 // Try to send report of this archival to the Master for updating quota usage faster 2340 reportArchivedFilesForQuota(filesToRemove, storeFileSizes); 2341 } 2342 } 2343 2344 /** 2345 * Computes the length of a store file without succumbing to any errors along the way. If an error 2346 * is encountered, the implementation returns {@code 0} instead of the actual size. 2347 * @param file The file to compute the size of. 2348 * @return The size in bytes of the provided {@code file}. 2349 */ 2350 long getStoreFileSize(HStoreFile file) { 2351 long length = 0; 2352 try { 2353 file.initReader(); 2354 length = file.getReader().length(); 2355 } catch (IOException e) { 2356 LOG.trace("Failed to open reader when trying to compute store file size for {}, ignoring", 2357 file, e); 2358 } finally { 2359 try { 2360 file.closeStoreFile( 2361 file.getCacheConf() != null ? file.getCacheConf().shouldEvictOnClose() : true); 2362 } catch (IOException e) { 2363 LOG.trace("Failed to close reader after computing store file size for {}, ignoring", file, 2364 e); 2365 } 2366 } 2367 return length; 2368 } 2369 2370 public Long preFlushSeqIDEstimation() { 2371 return memstore.preFlushSeqIDEstimation(); 2372 } 2373 2374 @Override 2375 public boolean isSloppyMemStore() { 2376 return this.memstore.isSloppy(); 2377 } 2378 2379 private void clearCompactedfiles(List<HStoreFile> filesToRemove) throws IOException { 2380 LOG.trace("Clearing the compacted file {} from this store", filesToRemove); 2381 storeEngine.removeCompactedFiles(filesToRemove); 2382 } 2383 2384 @Override 2385 public int getCurrentParallelPutCount() { 2386 return currentParallelPutCount.get(); 2387 } 2388 2389 public int getStoreRefCount() { 2390 return this.storeEngine.getStoreFileManager().getStorefiles().stream() 2391 .filter(sf -> sf.getReader() != null).filter(HStoreFile::isHFile) 2392 .mapToInt(HStoreFile::getRefCount).sum(); 2393 } 2394 2395 /** Returns get maximum ref count of storeFile among all compacted HStore Files for the HStore */ 2396 public int getMaxCompactedStoreFileRefCount() { 2397 OptionalInt maxCompactedStoreFileRefCount = this.storeEngine.getStoreFileManager() 2398 .getCompactedfiles().stream().filter(sf -> sf.getReader() != null).filter(HStoreFile::isHFile) 2399 .mapToInt(HStoreFile::getRefCount).max(); 2400 return maxCompactedStoreFileRefCount.isPresent() ? maxCompactedStoreFileRefCount.getAsInt() : 0; 2401 } 2402 2403 void reportArchivedFilesForQuota(List<? extends StoreFile> archivedFiles, List<Long> fileSizes) { 2404 // Sanity check from the caller 2405 if (archivedFiles.size() != fileSizes.size()) { 2406 throw new RuntimeException("Coding error: should never see lists of varying size"); 2407 } 2408 RegionServerServices rss = this.region.getRegionServerServices(); 2409 if (rss == null) { 2410 return; 2411 } 2412 List<Entry<String, Long>> filesWithSizes = new ArrayList<>(archivedFiles.size()); 2413 Iterator<Long> fileSizeIter = fileSizes.iterator(); 2414 for (StoreFile storeFile : archivedFiles) { 2415 final long fileSize = fileSizeIter.next(); 2416 if (storeFile.isHFile() && fileSize != 0) { 2417 filesWithSizes.add(Maps.immutableEntry(storeFile.getPath().getName(), fileSize)); 2418 } 2419 } 2420 if (LOG.isTraceEnabled()) { 2421 LOG.trace("Files archived: " + archivedFiles + ", reporting the following to the Master: " 2422 + filesWithSizes); 2423 } 2424 boolean success = rss.reportFileArchivalForQuotas(getTableName(), filesWithSizes); 2425 if (!success) { 2426 LOG.warn("Failed to report archival of files: " + filesWithSizes); 2427 } 2428 } 2429 2430 @Override 2431 public long getMemstoreOnlyRowReadsCount() { 2432 return memstoreOnlyRowReadsCount.sum(); 2433 } 2434 2435 @Override 2436 public long getMixedRowReadsCount() { 2437 return mixedRowReadsCount.sum(); 2438 } 2439 2440 @Override 2441 public Configuration getReadOnlyConfiguration() { 2442 return new ReadOnlyConfiguration(this.conf); 2443 } 2444 2445 void updateMetricsStore(boolean memstoreRead) { 2446 if (memstoreRead) { 2447 memstoreOnlyRowReadsCount.increment(); 2448 } else { 2449 mixedRowReadsCount.increment(); 2450 } 2451 } 2452 2453 /** 2454 * Return the storefiles which are currently being written to. Mainly used by 2455 * {@link BrokenStoreFileCleaner} to prevent deleting the these files as they are not present in 2456 * SFT yet. 2457 */ 2458 public Set<Path> getStoreFilesBeingWritten() { 2459 return storeFileWriterCreationTrackers.stream().flatMap(t -> t.get().stream()) 2460 .collect(Collectors.toSet()); 2461 } 2462 2463 @Override 2464 public long getBloomFilterRequestsCount() { 2465 return storeEngine.getBloomFilterMetrics().getRequestsCount(); 2466 } 2467 2468 @Override 2469 public long getBloomFilterNegativeResultsCount() { 2470 return storeEngine.getBloomFilterMetrics().getNegativeResultsCount(); 2471 } 2472 2473 @Override 2474 public long getBloomFilterEligibleRequestsCount() { 2475 return storeEngine.getBloomFilterMetrics().getEligibleRequestsCount(); 2476 } 2477}