001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.hfile; 019 020import static org.apache.hadoop.hbase.io.ByteBuffAllocator.HEAP; 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.assertNotNull; 023import static org.junit.Assert.assertNull; 024import static org.junit.Assert.assertTrue; 025 026import java.nio.ByteBuffer; 027import java.util.Random; 028import java.util.concurrent.ExecutorService; 029import java.util.concurrent.Executors; 030import java.util.concurrent.ThreadLocalRandom; 031import java.util.concurrent.TimeUnit; 032import java.util.concurrent.atomic.AtomicBoolean; 033import java.util.concurrent.atomic.AtomicInteger; 034import org.apache.hadoop.conf.Configuration; 035import org.apache.hadoop.hbase.HBaseClassTestRule; 036import org.apache.hadoop.hbase.HBaseConfiguration; 037import org.apache.hadoop.hbase.HConstants; 038import org.apache.hadoop.hbase.Waiter; 039import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; 040import org.apache.hadoop.hbase.io.HeapSize; 041import org.apache.hadoop.hbase.io.hfile.LruAdaptiveBlockCache.EvictionThread; 042import org.apache.hadoop.hbase.nio.ByteBuff; 043import org.apache.hadoop.hbase.testclassification.IOTests; 044import org.apache.hadoop.hbase.testclassification.SmallTests; 045import org.apache.hadoop.hbase.util.ClassSize; 046import org.junit.Assert; 047import org.junit.ClassRule; 048import org.junit.Test; 049import org.junit.experimental.categories.Category; 050import org.slf4j.Logger; 051import org.slf4j.LoggerFactory; 052 053/** 054 * Tests the concurrent LruAdaptiveBlockCache. 055 * <p> 056 * Tests will ensure it grows and shrinks in size properly, evictions run when they're supposed to 057 * and do what they should, and that cached blocks are accessible when expected to be. 058 */ 059@Category({ IOTests.class, SmallTests.class }) 060public class TestLruAdaptiveBlockCache { 061 062 @ClassRule 063 public static final HBaseClassTestRule CLASS_RULE = 064 HBaseClassTestRule.forClass(TestLruAdaptiveBlockCache.class); 065 066 private static final Logger LOG = LoggerFactory.getLogger(TestLruAdaptiveBlockCache.class); 067 068 private static final Configuration CONF = HBaseConfiguration.create(); 069 070 @Test 071 public void testCacheEvictionThreadSafe() throws Exception { 072 long maxSize = 100000; 073 int numBlocks = 9; 074 int testRuns = 10; 075 final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); 076 assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); 077 078 final LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize); 079 EvictionThread evictionThread = cache.getEvictionThread(); 080 assertNotNull(evictionThread); 081 Waiter.waitFor(CONF, 10000, 100, () -> evictionThread.isEnteringRun()); 082 final String hfileName = "hfile"; 083 int threads = 10; 084 final int blocksPerThread = 5 * numBlocks; 085 for (int run = 0; run != testRuns; ++run) { 086 final AtomicInteger blockCount = new AtomicInteger(0); 087 ExecutorService service = Executors.newFixedThreadPool(threads); 088 for (int i = 0; i != threads; ++i) { 089 service.execute(() -> { 090 for (int blockIndex = 0; blockIndex < blocksPerThread 091 || (!cache.isEvictionInProgress()); ++blockIndex) { 092 CachedItem block = 093 new CachedItem(hfileName, (int) blockSize, blockCount.getAndIncrement()); 094 boolean inMemory = Math.random() > 0.5; 095 cache.cacheBlock(block.cacheKey, block, inMemory); 096 } 097 cache.evictBlocksByHfileName(hfileName); 098 }); 099 } 100 service.shutdown(); 101 // The test may fail here if the evict thread frees the blocks too fast 102 service.awaitTermination(10, TimeUnit.MINUTES); 103 Waiter.waitFor(CONF, 10000, 100, new ExplainingPredicate<Exception>() { 104 @Override 105 public boolean evaluate() throws Exception { 106 return cache.getBlockCount() == 0; 107 } 108 109 @Override 110 public String explainFailure() throws Exception { 111 return "Cache block count failed to return to 0"; 112 } 113 }); 114 assertEquals(0, cache.getBlockCount()); 115 assertEquals(cache.getOverhead(), cache.getCurrentSize()); 116 } 117 } 118 119 @Test 120 public void testBackgroundEvictionThread() throws Exception { 121 long maxSize = 100000; 122 int numBlocks = 9; 123 long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); 124 assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); 125 126 LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize); 127 EvictionThread evictionThread = cache.getEvictionThread(); 128 assertNotNull(evictionThread); 129 130 CachedItem[] blocks = generateFixedBlocks(numBlocks + 1, blockSize, "block"); 131 132 // Make sure eviction thread has entered run method 133 Waiter.waitFor(CONF, 10000, 10, () -> evictionThread.isEnteringRun()); 134 135 // Add all the blocks 136 for (CachedItem block : blocks) { 137 cache.cacheBlock(block.cacheKey, block); 138 } 139 140 // wait until at least one eviction has run 141 Waiter.waitFor(CONF, 30000, 200, new ExplainingPredicate<Exception>() { 142 143 @Override 144 public boolean evaluate() throws Exception { 145 return cache.getStats().getEvictionCount() > 0; 146 } 147 148 @Override 149 public String explainFailure() throws Exception { 150 return "Eviction never happened."; 151 } 152 }); 153 154 // let cache stabilize 155 // On some systems, the cache will run multiple evictions before it attains 156 // steady-state. For instance, after populating the cache with 10 blocks, 157 // the first eviction evicts a single block and then a second eviction 158 // evicts another. I think this is due to the delta between minSize and 159 // acceptableSize, combined with variance between object overhead on 160 // different environments. 161 int n = 0; 162 for (long prevCnt = 0 /* < number of blocks added */, curCnt = cache.getBlockCount(); prevCnt 163 != curCnt; prevCnt = curCnt, curCnt = cache.getBlockCount()) { 164 Thread.sleep(200); 165 assertTrue("Cache never stabilized.", n++ < 100); 166 } 167 168 long evictionCount = cache.getStats().getEvictionCount(); 169 assertTrue(evictionCount >= 1); 170 LOG.info("Background Evictions run: {}", evictionCount); 171 } 172 173 @Test 174 public void testCacheSimple() throws Exception { 175 long maxSize = 1000000; 176 long blockSize = calculateBlockSizeDefault(maxSize, 101); 177 178 LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize); 179 180 CachedItem[] blocks = generateRandomBlocks(100, blockSize); 181 182 long expectedCacheSize = cache.heapSize(); 183 184 // Confirm empty 185 for (CachedItem block : blocks) { 186 assertTrue(cache.getBlock(block.cacheKey, true, false, true) == null); 187 } 188 189 // Add blocks 190 for (CachedItem block : blocks) { 191 cache.cacheBlock(block.cacheKey, block); 192 expectedCacheSize += block.cacheBlockHeapSize(); 193 } 194 195 // Verify correctly calculated cache heap size 196 assertEquals(expectedCacheSize, cache.heapSize()); 197 198 // Check if all blocks are properly cached and retrieved 199 for (CachedItem block : blocks) { 200 HeapSize buf = cache.getBlock(block.cacheKey, true, false, true); 201 assertTrue(buf != null); 202 assertEquals(buf.heapSize(), block.heapSize()); 203 } 204 205 // Re-add same blocks and ensure nothing has changed 206 long expectedBlockCount = cache.getBlockCount(); 207 for (CachedItem block : blocks) { 208 cache.cacheBlock(block.cacheKey, block); 209 } 210 assertEquals("Cache should ignore cache requests for blocks already in cache", 211 expectedBlockCount, cache.getBlockCount()); 212 213 // Verify correctly calculated cache heap size 214 assertEquals(expectedCacheSize, cache.heapSize()); 215 216 // Check if all blocks are properly cached and retrieved 217 for (CachedItem block : blocks) { 218 HeapSize buf = cache.getBlock(block.cacheKey, true, false, true); 219 assertTrue(buf != null); 220 assertEquals(buf.heapSize(), block.heapSize()); 221 } 222 223 CacheTestUtils.testConvertToJSON(cache); 224 225 // Expect no evictions 226 assertEquals(0, cache.getStats().getEvictionCount()); 227 Thread t = new LruAdaptiveBlockCache.StatisticsThread(cache); 228 t.start(); 229 t.join(); 230 } 231 232 @Test 233 public void testCacheEvictionSimple() throws Exception { 234 long maxSize = 100000; 235 long blockSize = calculateBlockSizeDefault(maxSize, 10); 236 237 LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false); 238 239 CachedItem[] blocks = generateFixedBlocks(10, blockSize, "block"); 240 241 long expectedCacheSize = cache.heapSize(); 242 243 // Add all the blocks 244 for (CachedItem block : blocks) { 245 cache.cacheBlock(block.cacheKey, block); 246 expectedCacheSize += block.cacheBlockHeapSize(); 247 } 248 249 // A single eviction run should have occurred 250 assertEquals(1, cache.getStats().getEvictionCount()); 251 252 // Our expected size overruns acceptable limit 253 assertTrue(expectedCacheSize > (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); 254 255 // But the cache did not grow beyond max 256 assertTrue(cache.heapSize() < maxSize); 257 258 // And is still below the acceptable limit 259 assertTrue(cache.heapSize() < (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); 260 261 // All blocks except block 0 should be in the cache 262 assertTrue(cache.getBlock(blocks[0].cacheKey, true, false, true) == null); 263 for (int i = 1; i < blocks.length; i++) { 264 assertEquals(cache.getBlock(blocks[i].cacheKey, true, false, true), blocks[i]); 265 } 266 } 267 268 @Test 269 public void testCacheEvictionTwoPriorities() throws Exception { 270 long maxSize = 100000; 271 long blockSize = calculateBlockSizeDefault(maxSize, 10); 272 273 LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false); 274 275 CachedItem[] singleBlocks = generateFixedBlocks(5, 10000, "single"); 276 CachedItem[] multiBlocks = generateFixedBlocks(5, 10000, "multi"); 277 278 long expectedCacheSize = cache.heapSize(); 279 280 // Add and get the multi blocks 281 for (CachedItem block : multiBlocks) { 282 cache.cacheBlock(block.cacheKey, block); 283 expectedCacheSize += block.cacheBlockHeapSize(); 284 assertEquals(cache.getBlock(block.cacheKey, true, false, true), block); 285 } 286 287 // Add the single blocks (no get) 288 for (CachedItem block : singleBlocks) { 289 cache.cacheBlock(block.cacheKey, block); 290 expectedCacheSize += block.heapSize(); 291 } 292 293 // A single eviction run should have occurred 294 assertEquals(1, cache.getStats().getEvictionCount()); 295 296 // We expect two entries evicted 297 assertEquals(2, cache.getStats().getEvictedCount()); 298 299 // Our expected size overruns acceptable limit 300 assertTrue(expectedCacheSize > (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); 301 302 // But the cache did not grow beyond max 303 assertTrue(cache.heapSize() <= maxSize); 304 305 // And is now below the acceptable limit 306 assertTrue(cache.heapSize() <= (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); 307 308 // We expect fairness across the two priorities. 309 // This test makes multi go barely over its limit, in-memory 310 // empty, and the rest in single. Two single evictions and 311 // one multi eviction expected. 312 assertTrue(cache.getBlock(singleBlocks[0].cacheKey, true, false, true) == null); 313 assertTrue(cache.getBlock(multiBlocks[0].cacheKey, true, false, true) == null); 314 315 // And all others to be cached 316 for (int i = 1; i < 4; i++) { 317 assertEquals(cache.getBlock(singleBlocks[i].cacheKey, true, false, true), singleBlocks[i]); 318 assertEquals(cache.getBlock(multiBlocks[i].cacheKey, true, false, true), multiBlocks[i]); 319 } 320 } 321 322 @Test 323 public void testCacheEvictionThreePriorities() throws Exception { 324 long maxSize = 100000; 325 long blockSize = calculateBlockSize(maxSize, 10); 326 327 LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, 328 (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, 329 LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.98f, // min 330 0.99f, // acceptable 331 0.33f, // single 332 0.33f, // multi 333 0.34f, // memory 334 1.2f, // limit 335 false, 16 * 1024 * 1024, 10, 500, 0.01f); 336 337 CachedItem[] singleBlocks = generateFixedBlocks(5, blockSize, "single"); 338 CachedItem[] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); 339 CachedItem[] memoryBlocks = generateFixedBlocks(5, blockSize, "memory"); 340 341 long expectedCacheSize = cache.heapSize(); 342 343 // Add 3 blocks from each priority 344 for (int i = 0; i < 3; i++) { 345 346 // Just add single blocks 347 cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); 348 expectedCacheSize += singleBlocks[i].cacheBlockHeapSize(); 349 350 // Add and get multi blocks 351 cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]); 352 expectedCacheSize += multiBlocks[i].cacheBlockHeapSize(); 353 cache.getBlock(multiBlocks[i].cacheKey, true, false, true); 354 355 // Add memory blocks as such 356 cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true); 357 expectedCacheSize += memoryBlocks[i].cacheBlockHeapSize(); 358 359 } 360 361 // Do not expect any evictions yet 362 assertEquals(0, cache.getStats().getEvictionCount()); 363 364 // Verify cache size 365 assertEquals(expectedCacheSize, cache.heapSize()); 366 367 // Insert a single block, oldest single should be evicted 368 cache.cacheBlock(singleBlocks[3].cacheKey, singleBlocks[3]); 369 370 // Single eviction, one thing evicted 371 assertEquals(1, cache.getStats().getEvictionCount()); 372 assertEquals(1, cache.getStats().getEvictedCount()); 373 374 // Verify oldest single block is the one evicted 375 assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); 376 377 // Change the oldest remaining single block to a multi 378 cache.getBlock(singleBlocks[1].cacheKey, true, false, true); 379 380 // Insert another single block 381 cache.cacheBlock(singleBlocks[4].cacheKey, singleBlocks[4]); 382 383 // Two evictions, two evicted. 384 assertEquals(2, cache.getStats().getEvictionCount()); 385 assertEquals(2, cache.getStats().getEvictedCount()); 386 387 // Oldest multi block should be evicted now 388 assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); 389 390 // Insert another memory block 391 cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true); 392 393 // Three evictions, three evicted. 394 assertEquals(3, cache.getStats().getEvictionCount()); 395 assertEquals(3, cache.getStats().getEvictedCount()); 396 397 // Oldest memory block should be evicted now 398 assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, true)); 399 400 // Add a block that is twice as big (should force two evictions) 401 CachedItem[] bigBlocks = generateFixedBlocks(3, blockSize * 3, "big"); 402 cache.cacheBlock(bigBlocks[0].cacheKey, bigBlocks[0]); 403 404 // Four evictions, six evicted (inserted block 3X size, expect +3 evicted) 405 assertEquals(4, cache.getStats().getEvictionCount()); 406 assertEquals(6, cache.getStats().getEvictedCount()); 407 408 // Expect three remaining singles to be evicted 409 assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false, true)); 410 assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true, false, true)); 411 assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true, false, true)); 412 413 // Make the big block a multi block 414 cache.getBlock(bigBlocks[0].cacheKey, true, false, true); 415 416 // Cache another single big block 417 cache.cacheBlock(bigBlocks[1].cacheKey, bigBlocks[1]); 418 419 // Five evictions, nine evicted (3 new) 420 assertEquals(5, cache.getStats().getEvictionCount()); 421 assertEquals(9, cache.getStats().getEvictedCount()); 422 423 // Expect three remaining multis to be evicted 424 assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); 425 assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, true)); 426 assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, true)); 427 428 // Cache a big memory block 429 cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true); 430 431 // Six evictions, twelve evicted (3 new) 432 assertEquals(6, cache.getStats().getEvictionCount()); 433 assertEquals(12, cache.getStats().getEvictedCount()); 434 435 // Expect three remaining in-memory to be evicted 436 assertEquals(null, cache.getBlock(memoryBlocks[1].cacheKey, true, false, true)); 437 assertEquals(null, cache.getBlock(memoryBlocks[2].cacheKey, true, false, true)); 438 assertEquals(null, cache.getBlock(memoryBlocks[3].cacheKey, true, false, true)); 439 } 440 441 @Test 442 public void testCacheEvictionInMemoryForceMode() throws Exception { 443 long maxSize = 100000; 444 long blockSize = calculateBlockSize(maxSize, 10); 445 446 LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, 447 (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, 448 LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.98f, // min 449 0.99f, // acceptable 450 0.2f, // single 451 0.3f, // multi 452 0.5f, // memory 453 1.2f, // limit 454 true, 16 * 1024 * 1024, 10, 500, 0.01f); 455 456 CachedItem[] singleBlocks = generateFixedBlocks(10, blockSize, "single"); 457 CachedItem[] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); 458 CachedItem[] memoryBlocks = generateFixedBlocks(10, blockSize, "memory"); 459 460 long expectedCacheSize = cache.heapSize(); 461 462 // 0. Add 5 single blocks and 4 multi blocks to make cache full, si:mu:me = 5:4:0 463 for (int i = 0; i < 4; i++) { 464 // Just add single blocks 465 cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); 466 expectedCacheSize += singleBlocks[i].cacheBlockHeapSize(); 467 // Add and get multi blocks 468 cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]); 469 expectedCacheSize += multiBlocks[i].cacheBlockHeapSize(); 470 cache.getBlock(multiBlocks[i].cacheKey, true, false, true); 471 } 472 // 5th single block 473 cache.cacheBlock(singleBlocks[4].cacheKey, singleBlocks[4]); 474 expectedCacheSize += singleBlocks[4].cacheBlockHeapSize(); 475 // Do not expect any evictions yet 476 assertEquals(0, cache.getStats().getEvictionCount()); 477 // Verify cache size 478 assertEquals(expectedCacheSize, cache.heapSize()); 479 480 // 1. Insert a memory block, oldest single should be evicted, si:mu:me = 4:4:1 481 cache.cacheBlock(memoryBlocks[0].cacheKey, memoryBlocks[0], true); 482 // Single eviction, one block evicted 483 assertEquals(1, cache.getStats().getEvictionCount()); 484 assertEquals(1, cache.getStats().getEvictedCount()); 485 // Verify oldest single block (index = 0) is the one evicted 486 assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); 487 488 // 2. Insert another memory block, another single evicted, si:mu:me = 3:4:2 489 cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true); 490 // Two evictions, two evicted. 491 assertEquals(2, cache.getStats().getEvictionCount()); 492 assertEquals(2, cache.getStats().getEvictedCount()); 493 // Current oldest single block (index = 1) should be evicted now 494 assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); 495 496 // 3. Insert 4 memory blocks, 2 single and 2 multi evicted, si:mu:me = 1:2:6 497 cache.cacheBlock(memoryBlocks[2].cacheKey, memoryBlocks[2], true); 498 cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true); 499 cache.cacheBlock(memoryBlocks[4].cacheKey, memoryBlocks[4], true); 500 cache.cacheBlock(memoryBlocks[5].cacheKey, memoryBlocks[5], true); 501 // Three evictions, three evicted. 502 assertEquals(6, cache.getStats().getEvictionCount()); 503 assertEquals(6, cache.getStats().getEvictedCount()); 504 // two oldest single blocks and two oldest multi blocks evicted 505 assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false, true)); 506 assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true, false, true)); 507 assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); 508 assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, true)); 509 510 // 4. Insert 3 memory blocks, the remaining 1 single and 2 multi evicted 511 // si:mu:me = 0:0:9 512 cache.cacheBlock(memoryBlocks[6].cacheKey, memoryBlocks[6], true); 513 cache.cacheBlock(memoryBlocks[7].cacheKey, memoryBlocks[7], true); 514 cache.cacheBlock(memoryBlocks[8].cacheKey, memoryBlocks[8], true); 515 // Three evictions, three evicted. 516 assertEquals(9, cache.getStats().getEvictionCount()); 517 assertEquals(9, cache.getStats().getEvictedCount()); 518 // one oldest single block and two oldest multi blocks evicted 519 assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true, false, true)); 520 assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, true)); 521 assertEquals(null, cache.getBlock(multiBlocks[3].cacheKey, true, false, true)); 522 523 // 5. Insert one memory block, the oldest memory evicted 524 // si:mu:me = 0:0:9 525 cache.cacheBlock(memoryBlocks[9].cacheKey, memoryBlocks[9], true); 526 // one eviction, one evicted. 527 assertEquals(10, cache.getStats().getEvictionCount()); 528 assertEquals(10, cache.getStats().getEvictedCount()); 529 // oldest memory block evicted 530 assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, true)); 531 532 // 6. Insert one new single block, itself evicted immediately since 533 // all blocks in cache are memory-type which have higher priority 534 // si:mu:me = 0:0:9 (no change) 535 cache.cacheBlock(singleBlocks[9].cacheKey, singleBlocks[9]); 536 // one eviction, one evicted. 537 assertEquals(11, cache.getStats().getEvictionCount()); 538 assertEquals(11, cache.getStats().getEvictedCount()); 539 // the single block just cached now evicted (can't evict memory) 540 assertEquals(null, cache.getBlock(singleBlocks[9].cacheKey, true, false, true)); 541 } 542 543 // test scan resistance 544 @Test 545 public void testScanResistance() throws Exception { 546 547 long maxSize = 100000; 548 long blockSize = calculateBlockSize(maxSize, 10); 549 550 LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, 551 (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, 552 LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min 553 0.99f, // acceptable 554 0.33f, // single 555 0.33f, // multi 556 0.34f, // memory 557 1.2f, // limit 558 false, 16 * 1024 * 1024, 10, 500, 0.01f); 559 560 CachedItem[] singleBlocks = generateFixedBlocks(20, blockSize, "single"); 561 CachedItem[] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); 562 563 // Add 5 multi blocks 564 for (CachedItem block : multiBlocks) { 565 cache.cacheBlock(block.cacheKey, block); 566 cache.getBlock(block.cacheKey, true, false, true); 567 } 568 569 // Add 5 single blocks 570 for (int i = 0; i < 5; i++) { 571 cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); 572 } 573 574 // An eviction ran 575 assertEquals(1, cache.getStats().getEvictionCount()); 576 577 // To drop down to 2/3 capacity, we'll need to evict 4 blocks 578 assertEquals(4, cache.getStats().getEvictedCount()); 579 580 // Should have been taken off equally from single and multi 581 assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); 582 assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); 583 assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); 584 assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, true)); 585 586 // Let's keep "scanning" by adding single blocks. From here on we only 587 // expect evictions from the single bucket. 588 589 // Every time we reach 10 total blocks (every 4 inserts) we get 4 single 590 // blocks evicted. Inserting 13 blocks should yield 3 more evictions and 591 // 12 more evicted. 592 593 for (int i = 5; i < 18; i++) { 594 cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); 595 } 596 597 // 4 total evictions, 16 total evicted 598 assertEquals(4, cache.getStats().getEvictionCount()); 599 assertEquals(16, cache.getStats().getEvictedCount()); 600 601 // Should now have 7 total blocks 602 assertEquals(7, cache.getBlockCount()); 603 604 } 605 606 @Test 607 public void testMaxBlockSize() throws Exception { 608 long maxSize = 100000; 609 long blockSize = calculateBlockSize(maxSize, 10); 610 611 LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, 612 (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, 613 LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min 614 0.99f, // acceptable 615 0.33f, // single 616 0.33f, // multi 617 0.34f, // memory 618 1.2f, // limit 619 false, 1024, 10, 500, 0.01f); 620 621 CachedItem[] tooLong = generateFixedBlocks(10, 1024 + 5, "long"); 622 CachedItem[] small = generateFixedBlocks(15, 600, "small"); 623 624 for (CachedItem i : tooLong) { 625 cache.cacheBlock(i.cacheKey, i); 626 } 627 for (CachedItem i : small) { 628 cache.cacheBlock(i.cacheKey, i); 629 } 630 assertEquals(15, cache.getBlockCount()); 631 for (CachedItem i : small) { 632 assertNotNull(cache.getBlock(i.cacheKey, true, false, false)); 633 } 634 for (CachedItem i : tooLong) { 635 assertNull(cache.getBlock(i.cacheKey, true, false, false)); 636 } 637 638 assertEquals(10, cache.getStats().getFailedInserts()); 639 } 640 641 // test setMaxSize 642 @Test 643 public void testResizeBlockCache() throws Exception { 644 long maxSize = 300000; 645 long blockSize = calculateBlockSize(maxSize, 31); 646 647 LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, 648 (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, 649 LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.98f, // min 650 0.99f, // acceptable 651 0.33f, // single 652 0.33f, // multi 653 0.34f, // memory 654 1.2f, // limit 655 false, 16 * 1024 * 1024, 10, 500, 0.01f); 656 657 CachedItem[] singleBlocks = generateFixedBlocks(10, blockSize, "single"); 658 CachedItem[] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); 659 CachedItem[] memoryBlocks = generateFixedBlocks(10, blockSize, "memory"); 660 661 // Add all blocks from all priorities 662 for (int i = 0; i < 10; i++) { 663 // Just add single blocks 664 cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); 665 666 // Add and get multi blocks 667 cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]); 668 cache.getBlock(multiBlocks[i].cacheKey, true, false, true); 669 670 // Add memory blocks as such 671 cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true); 672 } 673 674 // Do not expect any evictions yet 675 assertEquals(0, cache.getStats().getEvictionCount()); 676 677 // Resize to half capacity plus an extra block (otherwise we evict an extra) 678 cache.setMaxSize((long) (maxSize * 0.5f)); 679 680 // Should have run a single eviction 681 assertEquals(1, cache.getStats().getEvictionCount()); 682 683 // And we expect 1/2 of the blocks to be evicted 684 assertEquals(15, cache.getStats().getEvictedCount()); 685 686 // And the oldest 5 blocks from each category should be gone 687 for (int i = 0; i < 5; i++) { 688 assertEquals(null, cache.getBlock(singleBlocks[i].cacheKey, true, false, true)); 689 assertEquals(null, cache.getBlock(multiBlocks[i].cacheKey, true, false, true)); 690 assertEquals(null, cache.getBlock(memoryBlocks[i].cacheKey, true, false, true)); 691 } 692 693 // And the newest 5 blocks should still be accessible 694 for (int i = 5; i < 10; i++) { 695 assertEquals(singleBlocks[i], cache.getBlock(singleBlocks[i].cacheKey, true, false, true)); 696 assertEquals(multiBlocks[i], cache.getBlock(multiBlocks[i].cacheKey, true, false, true)); 697 assertEquals(memoryBlocks[i], cache.getBlock(memoryBlocks[i].cacheKey, true, false, true)); 698 } 699 } 700 701 // test metricsPastNPeriods 702 @Test 703 public void testPastNPeriodsMetrics() throws Exception { 704 double delta = 0.01; 705 706 // 3 total periods 707 CacheStats stats = new CacheStats("test", 3); 708 709 // No accesses, should be 0 710 stats.rollMetricsPeriod(); 711 assertEquals(0.0, stats.getHitRatioPastNPeriods(), delta); 712 assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); 713 714 // period 1, 1 hit caching, 1 hit non-caching, 2 miss non-caching 715 // should be (2/4)=0.5 and (1/1)=1 716 stats.hit(false, true, BlockType.DATA); 717 stats.hit(true, true, BlockType.DATA); 718 stats.miss(false, false, BlockType.DATA); 719 stats.miss(false, false, BlockType.DATA); 720 stats.rollMetricsPeriod(); 721 assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); 722 assertEquals(1.0, stats.getHitCachingRatioPastNPeriods(), delta); 723 724 // period 2, 1 miss caching, 3 miss non-caching 725 // should be (2/8)=0.25 and (1/2)=0.5 726 stats.miss(true, false, BlockType.DATA); 727 stats.miss(false, false, BlockType.DATA); 728 stats.miss(false, false, BlockType.DATA); 729 stats.miss(false, false, BlockType.DATA); 730 stats.rollMetricsPeriod(); 731 assertEquals(0.25, stats.getHitRatioPastNPeriods(), delta); 732 assertEquals(0.5, stats.getHitCachingRatioPastNPeriods(), delta); 733 734 // period 3, 2 hits of each type 735 // should be (6/12)=0.5 and (3/4)=0.75 736 stats.hit(false, true, BlockType.DATA); 737 stats.hit(true, true, BlockType.DATA); 738 stats.hit(false, true, BlockType.DATA); 739 stats.hit(true, true, BlockType.DATA); 740 stats.rollMetricsPeriod(); 741 assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); 742 assertEquals(0.75, stats.getHitCachingRatioPastNPeriods(), delta); 743 744 // period 4, evict period 1, two caching misses 745 // should be (4/10)=0.4 and (2/5)=0.4 746 stats.miss(true, false, BlockType.DATA); 747 stats.miss(true, false, BlockType.DATA); 748 stats.rollMetricsPeriod(); 749 assertEquals(0.4, stats.getHitRatioPastNPeriods(), delta); 750 assertEquals(0.4, stats.getHitCachingRatioPastNPeriods(), delta); 751 752 // period 5, evict period 2, 2 caching misses, 2 non-caching hit 753 // should be (6/10)=0.6 and (2/6)=1/3 754 stats.miss(true, false, BlockType.DATA); 755 stats.miss(true, false, BlockType.DATA); 756 stats.hit(false, true, BlockType.DATA); 757 stats.hit(false, true, BlockType.DATA); 758 stats.rollMetricsPeriod(); 759 assertEquals(0.6, stats.getHitRatioPastNPeriods(), delta); 760 assertEquals((double) 1 / 3, stats.getHitCachingRatioPastNPeriods(), delta); 761 762 // period 6, evict period 3 763 // should be (2/6)=1/3 and (0/4)=0 764 stats.rollMetricsPeriod(); 765 assertEquals((double) 1 / 3, stats.getHitRatioPastNPeriods(), delta); 766 assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); 767 768 // period 7, evict period 4 769 // should be (2/4)=0.5 and (0/2)=0 770 stats.rollMetricsPeriod(); 771 assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); 772 assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); 773 774 // period 8, evict period 5 775 // should be 0 and 0 776 stats.rollMetricsPeriod(); 777 assertEquals(0.0, stats.getHitRatioPastNPeriods(), delta); 778 assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); 779 780 // period 9, one of each 781 // should be (2/4)=0.5 and (1/2)=0.5 782 stats.miss(true, false, BlockType.DATA); 783 stats.miss(false, false, BlockType.DATA); 784 stats.hit(true, true, BlockType.DATA); 785 stats.hit(false, true, BlockType.DATA); 786 stats.rollMetricsPeriod(); 787 assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); 788 assertEquals(0.5, stats.getHitCachingRatioPastNPeriods(), delta); 789 } 790 791 @Test 792 public void testCacheBlockNextBlockMetadataMissing() { 793 long maxSize = 100000; 794 long blockSize = calculateBlockSize(maxSize, 10); 795 int size = 100; 796 int length = HConstants.HFILEBLOCK_HEADER_SIZE + size; 797 byte[] byteArr = new byte[length]; 798 ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size); 799 HFileContext meta = new HFileContextBuilder().build(); 800 HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, 801 ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, 52, -1, meta, HEAP); 802 HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, 803 ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, -1, -1, meta, HEAP); 804 805 LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, 806 (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, 807 LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min 808 0.99f, // acceptable 809 0.33f, // single 810 0.33f, // multi 811 0.34f, // memory 812 1.2f, // limit 813 false, 1024, 10, 500, 0.01f); 814 815 BlockCacheKey key = new BlockCacheKey("key1", 0); 816 ByteBuffer actualBuffer = ByteBuffer.allocate(length); 817 ByteBuffer block1Buffer = ByteBuffer.allocate(length); 818 ByteBuffer block2Buffer = ByteBuffer.allocate(length); 819 blockWithNextBlockMetadata.serialize(block1Buffer, true); 820 blockWithoutNextBlockMetadata.serialize(block2Buffer, true); 821 822 // Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back. 823 CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer, 824 block1Buffer); 825 826 // Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back. 827 CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer, 828 block1Buffer); 829 830 // Clear and add blockWithoutNextBlockMetadata 831 cache.clearCache(); 832 assertNull(cache.getBlock(key, false, false, false)); 833 CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer, 834 block2Buffer); 835 836 // Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace. 837 CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer, 838 block1Buffer); 839 } 840 841 private CachedItem[] generateFixedBlocks(int numBlocks, int size, String pfx) { 842 CachedItem[] blocks = new CachedItem[numBlocks]; 843 for (int i = 0; i < numBlocks; i++) { 844 blocks[i] = new CachedItem(pfx + i, size); 845 } 846 return blocks; 847 } 848 849 private CachedItem[] generateFixedBlocks(int numBlocks, long size, String pfx) { 850 return generateFixedBlocks(numBlocks, (int) size, pfx); 851 } 852 853 private CachedItem[] generateRandomBlocks(int numBlocks, long maxSize) { 854 CachedItem[] blocks = new CachedItem[numBlocks]; 855 Random rand = ThreadLocalRandom.current(); 856 for (int i = 0; i < numBlocks; i++) { 857 blocks[i] = new CachedItem("block" + i, rand.nextInt((int) maxSize) + 1); 858 } 859 return blocks; 860 } 861 862 private long calculateBlockSize(long maxSize, int numBlocks) { 863 long roughBlockSize = maxSize / numBlocks; 864 int numEntries = (int) Math.ceil((1.2) * maxSize / roughBlockSize); 865 long totalOverhead = LruAdaptiveBlockCache.CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP 866 + (numEntries * ClassSize.CONCURRENT_HASHMAP_ENTRY) 867 + (LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL * ClassSize.CONCURRENT_HASHMAP_SEGMENT); 868 long negateBlockSize = (long) (totalOverhead / numEntries); 869 negateBlockSize += LruCachedBlock.PER_BLOCK_OVERHEAD; 870 return ClassSize.align((long) Math.floor((roughBlockSize - negateBlockSize) * 0.99f)); 871 } 872 873 private long calculateBlockSizeDefault(long maxSize, int numBlocks) { 874 long roughBlockSize = maxSize / numBlocks; 875 int numEntries = (int) Math.ceil((1.2) * maxSize / roughBlockSize); 876 long totalOverhead = LruAdaptiveBlockCache.CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP 877 + (numEntries * ClassSize.CONCURRENT_HASHMAP_ENTRY) 878 + (LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL * ClassSize.CONCURRENT_HASHMAP_SEGMENT); 879 long negateBlockSize = totalOverhead / numEntries; 880 negateBlockSize += LruCachedBlock.PER_BLOCK_OVERHEAD; 881 return ClassSize.align((long) Math 882 .floor((roughBlockSize - negateBlockSize) * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); 883 } 884 885 private static class CachedItem implements Cacheable { 886 BlockCacheKey cacheKey; 887 int size; 888 889 CachedItem(String blockName, int size, int offset) { 890 this.cacheKey = new BlockCacheKey(blockName, offset); 891 this.size = size; 892 } 893 894 CachedItem(String blockName, int size) { 895 this.cacheKey = new BlockCacheKey(blockName, 0); 896 this.size = size; 897 } 898 899 /** The size of this item reported to the block cache layer */ 900 @Override 901 public long heapSize() { 902 return ClassSize.align(size); 903 } 904 905 /** Size of the cache block holding this item. Used for verification. */ 906 public long cacheBlockHeapSize() { 907 return LruCachedBlock.PER_BLOCK_OVERHEAD + ClassSize.align(cacheKey.heapSize()) 908 + ClassSize.align(size); 909 } 910 911 @Override 912 public int getSerializedLength() { 913 return 0; 914 } 915 916 @Override 917 public CacheableDeserializer<Cacheable> getDeserializer() { 918 return null; 919 } 920 921 @Override 922 public void serialize(ByteBuffer destination, boolean includeNextBlockMetadata) { 923 } 924 925 @Override 926 public BlockType getBlockType() { 927 return BlockType.DATA; 928 } 929 } 930 931 static void testMultiThreadGetAndEvictBlockInternal(BlockCache cache) throws Exception { 932 int size = 100; 933 int length = HConstants.HFILEBLOCK_HEADER_SIZE + size; 934 byte[] byteArr = new byte[length]; 935 HFileContext meta = new HFileContextBuilder().build(); 936 BlockCacheKey key = new BlockCacheKey("key1", 0); 937 HFileBlock blk = new HFileBlock(BlockType.DATA, size, size, -1, 938 ByteBuff.wrap(ByteBuffer.wrap(byteArr, 0, size)), HFileBlock.FILL_HEADER, -1, 52, -1, meta, 939 HEAP); 940 AtomicBoolean err1 = new AtomicBoolean(false); 941 Thread t1 = new Thread(() -> { 942 for (int i = 0; i < 10000 && !err1.get(); i++) { 943 try { 944 cache.getBlock(key, false, false, true); 945 } catch (Exception e) { 946 err1.set(true); 947 LOG.info("Cache block or get block failure: ", e); 948 } 949 } 950 }); 951 952 AtomicBoolean err2 = new AtomicBoolean(false); 953 Thread t2 = new Thread(() -> { 954 for (int i = 0; i < 10000 && !err2.get(); i++) { 955 try { 956 cache.evictBlock(key); 957 } catch (Exception e) { 958 err2.set(true); 959 LOG.info("Evict block failure: ", e); 960 } 961 } 962 }); 963 964 AtomicBoolean err3 = new AtomicBoolean(false); 965 Thread t3 = new Thread(() -> { 966 for (int i = 0; i < 10000 && !err3.get(); i++) { 967 try { 968 cache.cacheBlock(key, blk); 969 } catch (Exception e) { 970 err3.set(true); 971 LOG.info("Cache block failure: ", e); 972 } 973 } 974 }); 975 t1.start(); 976 t2.start(); 977 t3.start(); 978 t1.join(); 979 t2.join(); 980 t3.join(); 981 Assert.assertFalse(err1.get()); 982 Assert.assertFalse(err2.get()); 983 Assert.assertFalse(err3.get()); 984 } 985 986 @Test 987 public void testMultiThreadGetAndEvictBlock() throws Exception { 988 long maxSize = 100000; 989 long blockSize = calculateBlockSize(maxSize, 10); 990 LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, 991 (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, 992 LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min 993 0.99f, // acceptable 994 0.33f, // single 995 0.33f, // multi 996 0.34f, // memory 997 1.2f, // limit 998 false, 1024, 10, 500, 0.01f); 999 testMultiThreadGetAndEvictBlockInternal(cache); 1000 } 1001 1002 public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws Exception { 1003 long maxSize = 100000000; 1004 int numBlocks = 100000; 1005 final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); 1006 assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); 1007 1008 final LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, true, 1009 (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, 1010 LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min 1011 0.99f, // acceptable 1012 0.33f, // single 1013 0.33f, // multi 1014 0.34f, // memory 1015 1.2f, // limit 1016 false, maxSize, heavyEvictionCountLimit, 200, 0.01f); 1017 1018 EvictionThread evictionThread = cache.getEvictionThread(); 1019 assertNotNull(evictionThread); 1020 Waiter.waitFor(CONF, 10000, 10, () -> evictionThread.isEnteringRun()); 1021 1022 final String hfileName = "hfile"; 1023 for (int blockIndex = 0; blockIndex <= numBlocks * 3000; ++blockIndex) { 1024 CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); 1025 cache.cacheBlock(block.cacheKey, block, false); 1026 if (cache.getCacheDataBlockPercent() < 70) { 1027 // enough for test 1028 break; 1029 } 1030 } 1031 1032 evictionThread.evict(); 1033 Thread.sleep(100); 1034 1035 if (heavyEvictionCountLimit == 0) { 1036 // Check if all offset (last two digits) of cached blocks less than the percent. 1037 // It means some of blocks haven't put into BlockCache 1038 assertTrue(cache.getCacheDataBlockPercent() < 90); 1039 for (BlockCacheKey key : cache.getMapForTests().keySet()) { 1040 assertTrue(!(key.getOffset() % 100 > 90)); 1041 } 1042 } else { 1043 // Check that auto-scaling is not working (all blocks in BlockCache) 1044 assertTrue(cache.getCacheDataBlockPercent() == 100); 1045 int counter = 0; 1046 for (BlockCacheKey key : cache.getMapForTests().keySet()) { 1047 if (key.getOffset() % 100 > 90) { 1048 counter++; 1049 } 1050 } 1051 assertTrue(counter > 1000); 1052 } 1053 evictionThread.shutdown(); 1054 } 1055 1056 @Test 1057 public void testSkipCacheDataBlocks() throws Exception { 1058 // Check that auto-scaling will work right after start 1059 testSkipCacheDataBlocksInteral(0); 1060 // Check that auto-scaling will not work right after start 1061 // (have to finished before auto-scaling) 1062 testSkipCacheDataBlocksInteral(100); 1063 } 1064}