001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile.bucket;
019
020import static org.junit.Assert.assertFalse;
021import static org.junit.Assert.assertTrue;
022
023import java.io.File;
024import java.io.IOException;
025import java.util.Iterator;
026import java.util.Map;
027import java.util.Random;
028import java.util.concurrent.ThreadLocalRandom;
029import org.apache.hadoop.conf.Configuration;
030import org.apache.hadoop.fs.FileSystem;
031import org.apache.hadoop.fs.Path;
032import org.apache.hadoop.hbase.HBaseClassTestRule;
033import org.apache.hadoop.hbase.HBaseTestingUtility;
034import org.apache.hadoop.hbase.KeyValue;
035import org.apache.hadoop.hbase.fs.HFileSystem;
036import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
037import org.apache.hadoop.hbase.io.hfile.BlockType;
038import org.apache.hadoop.hbase.io.hfile.CacheConfig;
039import org.apache.hadoop.hbase.io.hfile.HFile;
040import org.apache.hadoop.hbase.io.hfile.HFileBlock;
041import org.apache.hadoop.hbase.io.hfile.HFileContext;
042import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
043import org.apache.hadoop.hbase.io.hfile.PrefetchExecutor;
044import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil;
045import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
046import org.apache.hadoop.hbase.testclassification.IOTests;
047import org.apache.hadoop.hbase.testclassification.MediumTests;
048import org.junit.ClassRule;
049import org.junit.Test;
050import org.junit.experimental.categories.Category;
051import org.junit.rules.TestName;
052
053@Category({ IOTests.class, MediumTests.class })
054public class TestBucketCachePersister {
055
056  @ClassRule
057  public static final HBaseClassTestRule CLASS_RULE =
058    HBaseClassTestRule.forClass(TestBucketCachePersister.class);
059
060  public TestName name = new TestName();
061
062  public int constructedBlockSize = 16 * 1024;
063
064  public int[] constructedBlockSizes =
065    new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024,
066      28 * 1024 + 1024, 32 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024 };
067
068  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
069
070  private static final int NUM_VALID_KEY_TYPES = KeyValue.Type.values().length - 2;
071  private static final int DATA_BLOCK_SIZE = 2048;
072  private static final int NUM_KV = 1000;
073
074  final long capacitySize = 32 * 1024 * 1024;
075  final int writeThreads = BucketCache.DEFAULT_WRITER_THREADS;
076  final int writerQLen = BucketCache.DEFAULT_WRITER_QUEUE_ITEMS;
077  Path testDir;
078
079  public Configuration setupBucketCacheConfig(long bucketCachePersistInterval) throws IOException {
080    Configuration conf;
081    conf = TEST_UTIL.getConfiguration();
082    conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true);
083    testDir = TEST_UTIL.getDataTestDir();
084    TEST_UTIL.getTestFileSystem().mkdirs(testDir);
085    conf.setLong(CacheConfig.BUCKETCACHE_PERSIST_INTERVAL_KEY, bucketCachePersistInterval);
086    return conf;
087  }
088
089  public BucketCache setupBucketCache(Configuration conf) throws IOException {
090    BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize,
091      constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen,
092      testDir + "/bucket.persistence", 60 * 1000, conf);
093    return bucketCache;
094  }
095
096  public void cleanupBucketCache(BucketCache bucketCache) throws IOException {
097    bucketCache.shutdown();
098    TEST_UTIL.cleanupDataTestDirOnTestFS(String.valueOf(testDir));
099    assertFalse(TEST_UTIL.getTestFileSystem().exists(testDir));
100  }
101
102  @Test
103  public void testPrefetchPersistenceCrash() throws Exception {
104    long bucketCachePersistInterval = 3000;
105    Configuration conf = setupBucketCacheConfig(bucketCachePersistInterval);
106    BucketCache bucketCache = setupBucketCache(conf);
107    CacheConfig cacheConf = new CacheConfig(conf, bucketCache);
108    FileSystem fs = HFileSystem.get(conf);
109    // Load Cache
110    Path storeFile = writeStoreFile("TestPrefetch0", conf, cacheConf, fs);
111    Path storeFile2 = writeStoreFile("TestPrefetch1", conf, cacheConf, fs);
112    readStoreFile(storeFile, 0, fs, cacheConf, conf, bucketCache);
113    readStoreFile(storeFile2, 0, fs, cacheConf, conf, bucketCache);
114    Thread.sleep(bucketCachePersistInterval);
115    assertTrue(new File(testDir + "/bucket.persistence").exists());
116    assertTrue(new File(testDir + "/bucket.persistence").delete());
117    cleanupBucketCache(bucketCache);
118  }
119
120  @Test
121  public void testPrefetchPersistenceCrashNegative() throws Exception {
122    long bucketCachePersistInterval = Long.MAX_VALUE;
123    Configuration conf = setupBucketCacheConfig(bucketCachePersistInterval);
124    BucketCache bucketCache = setupBucketCache(conf);
125    CacheConfig cacheConf = new CacheConfig(conf, bucketCache);
126    FileSystem fs = HFileSystem.get(conf);
127    // Load Cache
128    Path storeFile = writeStoreFile("TestPrefetch2", conf, cacheConf, fs);
129    readStoreFile(storeFile, 0, fs, cacheConf, conf, bucketCache);
130    assertFalse(new File(testDir + "/bucket.persistence").exists());
131    cleanupBucketCache(bucketCache);
132  }
133
134  @Test
135  public void testPrefetchListUponBlockEviction() throws Exception {
136    Configuration conf = setupBucketCacheConfig(200);
137    BucketCache bucketCache = setupBucketCache(conf);
138    CacheConfig cacheConf = new CacheConfig(conf, bucketCache);
139    FileSystem fs = HFileSystem.get(conf);
140    // Load Blocks in cache
141    Path storeFile = writeStoreFile("TestPrefetch3", conf, cacheConf, fs);
142    readStoreFile(storeFile, 0, fs, cacheConf, conf, bucketCache);
143    int retries = 0;
144    while (!bucketCache.fullyCachedFiles.containsKey(storeFile.getName()) && retries < 5) {
145      Thread.sleep(500);
146      retries++;
147    }
148    assertTrue(retries < 5);
149    BlockCacheKey bucketCacheKey = bucketCache.backingMap.entrySet().iterator().next().getKey();
150    // Evict Blocks from cache
151    bucketCache.evictBlock(bucketCacheKey);
152    assertFalse(bucketCache.fullyCachedFiles.containsKey(storeFile.getName()));
153    cleanupBucketCache(bucketCache);
154  }
155
156  @Test
157  public void testPrefetchBlockEvictionWhilePrefetchRunning() throws Exception {
158    Configuration conf = setupBucketCacheConfig(200);
159    BucketCache bucketCache = setupBucketCache(conf);
160    CacheConfig cacheConf = new CacheConfig(conf, bucketCache);
161    FileSystem fs = HFileSystem.get(conf);
162    // Load Blocks in cache
163    Path storeFile = writeStoreFile("TestPrefetch3", conf, cacheConf, fs);
164    HFile.createReader(fs, storeFile, cacheConf, true, conf);
165    boolean evicted = false;
166    while (!PrefetchExecutor.isCompleted(storeFile)) {
167      if (bucketCache.backingMap.size() > 0 && !evicted) {
168        Iterator<Map.Entry<BlockCacheKey, BucketEntry>> it =
169          bucketCache.backingMap.entrySet().iterator();
170        // Evict a data block from cache
171        Map.Entry<BlockCacheKey, BucketEntry> entry = it.next();
172        while (it.hasNext() && !evicted) {
173          if (entry.getKey().getBlockType().equals(BlockType.DATA)) {
174            evicted = bucketCache.evictBlock(it.next().getKey());
175          }
176        }
177      }
178      Thread.sleep(10);
179    }
180    assertFalse(bucketCache.fullyCachedFiles.containsKey(storeFile.getName()));
181    cleanupBucketCache(bucketCache);
182  }
183
184  public void readStoreFile(Path storeFilePath, long offset, FileSystem fs, CacheConfig cacheConf,
185    Configuration conf, BucketCache bucketCache) throws Exception {
186    // Open the file
187    HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, true, conf);
188
189    while (!reader.prefetchComplete()) {
190      // Sleep for a bit
191      Thread.sleep(1000);
192    }
193    HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, null);
194    BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset);
195    BucketEntry be = bucketCache.backingMap.get(blockCacheKey);
196    boolean isCached = bucketCache.getBlock(blockCacheKey, true, false, true) != null;
197
198    if (
199      block.getBlockType() == BlockType.DATA || block.getBlockType() == BlockType.ROOT_INDEX
200        || block.getBlockType() == BlockType.INTERMEDIATE_INDEX
201    ) {
202      assertTrue(isCached);
203    }
204  }
205
206  public Path writeStoreFile(String fname, Configuration conf, CacheConfig cacheConf, FileSystem fs)
207    throws IOException {
208    Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), fname);
209    HFileContext meta = new HFileContextBuilder().withBlockSize(DATA_BLOCK_SIZE).build();
210    StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs)
211      .withOutputDir(storeFileParentDir).withFileContext(meta).build();
212    Random rand = ThreadLocalRandom.current();
213    final int rowLen = 32;
214    for (int i = 0; i < NUM_KV; ++i) {
215      byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i);
216      byte[] v = RandomKeyValueUtil.randomValue(rand);
217      int cfLen = rand.nextInt(k.length - rowLen + 1);
218      KeyValue kv = new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen,
219        k.length - rowLen - cfLen, rand.nextLong(), generateKeyType(rand), v, 0, v.length);
220      sfw.append(kv);
221    }
222
223    sfw.close();
224    return sfw.getPath();
225  }
226
227  public static KeyValue.Type generateKeyType(Random rand) {
228    if (rand.nextBoolean()) {
229      // Let's make half of KVs puts.
230      return KeyValue.Type.Put;
231    } else {
232      KeyValue.Type keyType = KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)];
233      if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) {
234        throw new RuntimeException("Generated an invalid key type: " + keyType + ". "
235          + "Probably the layout of KeyValue.Type has changed.");
236      }
237      return keyType;
238    }
239  }
240
241}