Package org.apache.hadoop.hbase.io.hfile
Class TestHFile
java.lang.Object
org.apache.hadoop.hbase.io.hfile.TestHFile
test hfile features.
-
Field Summary
Modifier and TypeFieldDescriptionprivate static org.apache.hadoop.hbase.io.hfile.CacheConfig
static final HBaseClassTestRule
private static org.apache.hadoop.conf.Configuration
private static org.apache.hadoop.fs.FileSystem
private static String
private static final org.slf4j.Logger
private final int
private static final int
private static String
private static final HBaseTestingUtil
org.junit.rules.TestName
-
Constructor Summary
-
Method Summary
Modifier and TypeMethodDescription(package private) void
basicWithSomeCodec
(String codec, boolean useTags) test none codecsprivate org.apache.hadoop.fs.FSDataOutputStream
createFSOutput
(org.apache.hadoop.fs.Path name) static org.apache.hadoop.hbase.io.hfile.HFile.Reader
createReaderFromStream
(org.apache.hadoop.hbase.io.hfile.ReaderContext context, org.apache.hadoop.hbase.io.hfile.CacheConfig cacheConf, org.apache.hadoop.conf.Configuration conf) private void
fillByteBuffAllocator
(org.apache.hadoop.hbase.io.ByteBuffAllocator alloc, int bufCount) static org.apache.hadoop.hbase.KeyValue.Type
generateKeyType
(Random rand) private org.apache.hadoop.hbase.ExtendedCell
getCell
(byte[] row, byte[] family, byte[] qualifier) private byte[]
getSomeKey
(int rowId) private org.apache.hadoop.hbase.io.ByteBuffAllocator
initAllocator
(boolean reservoirEnabled, int bufSize, int bufCount, int minAllocSize) private org.apache.hadoop.hbase.io.hfile.BlockCache
initCombinedBlockCache
(String l1CachePolicy) private void
metablocks
(String compress) private void
readAllRecords
(org.apache.hadoop.hbase.io.hfile.HFileScanner scanner) private int
readAndCheckbytes
(org.apache.hadoop.hbase.io.hfile.HFileScanner scanner, int start, int n) private long
readAtOffsetWithAllocationAsserts
(org.apache.hadoop.hbase.io.ByteBuffAllocator alloc, org.apache.hadoop.hbase.io.hfile.HFile.Reader reader, long offset, boolean cacheBlock, org.apache.hadoop.hbase.io.hfile.BlockType blockType, boolean expectSharedMem) private void
readNumMetablocks
(org.apache.hadoop.hbase.io.hfile.HFile.Reader reader, int n) private void
readStoreFile
(org.apache.hadoop.fs.Path storeFilePath, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.io.ByteBuffAllocator alloc) static void
setUp()
private void
someReadingWithMetaBlock
(org.apache.hadoop.hbase.io.hfile.HFile.Reader reader) private void
someTestingWithMetaBlock
(org.apache.hadoop.hbase.io.hfile.HFile.Writer writer) void
Make sure the ordinals for our compression algorithms do not change on us.void
Create 0-length hfile and show that it failsvoid
void
Create a truncated hfile and verify that exception thrown.void
void
Test empty HFile.void
void
protected void
testHFilefeaturesInternals
(boolean useTags) void
void
void
Tests that we properly allocate from the off-heap or on-heap when CombinedCache is configured.private void
testReaderBlockAllocationWithCombinedCache
(boolean cacheConfigCacheBlockOnRead, boolean cacheBlock, org.apache.hadoop.hbase.io.hfile.BlockType blockType, boolean expectSharedMem) void
Tests that we properly allocate from the off-heap or on-heap when LRUCache is configured.private void
testReaderBlockAllocationWithLRUCache
(boolean cacheConfigCacheBlockOnRead, boolean cacheBlock, org.apache.hadoop.hbase.io.hfile.BlockType blockType, boolean expectSharedMem) private void
testReaderCombinedCache
(String l1CachePolicy) void
Test case for CombinedBlockCache with AdaptiveLRU as L1 cachevoid
Test case for HBASE-22127 in CombinedBlockCachevoid
Test case for HBASE-22127 in LruBlockCache.void
Test case for CombinedBlockCache with AdaptiveLRU as L1 cachevoid
void
Test case for CombinedBlockCache with TinyLfu as L1 cachevoid
void
static void
truncateFile
(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.Path dst) private void
writeNumMetablocks
(org.apache.hadoop.hbase.io.hfile.HFile.Writer writer, int n) private void
writeRecords
(org.apache.hadoop.hbase.io.hfile.HFile.Writer writer, boolean useTags) private int
writeSomeRecords
(org.apache.hadoop.hbase.io.hfile.HFile.Writer writer, int start, int n, boolean useTags) private org.apache.hadoop.fs.Path
-
Field Details
-
CLASS_RULE
-
testName
-
LOG
-
NUM_VALID_KEY_TYPES
-
TEST_UTIL
-
ROOT_DIR
-
minBlockSize
- See Also:
-
localFormatter
-
cacheConf
-
conf
-
fs
-
-
Constructor Details
-
TestHFile
public TestHFile()
-
-
Method Details
-
setUp
- Throws:
Exception
-
createReaderFromStream
public static org.apache.hadoop.hbase.io.hfile.HFile.Reader createReaderFromStream(org.apache.hadoop.hbase.io.hfile.ReaderContext context, org.apache.hadoop.hbase.io.hfile.CacheConfig cacheConf, org.apache.hadoop.conf.Configuration conf) throws IOException - Throws:
IOException
-
initAllocator
private org.apache.hadoop.hbase.io.ByteBuffAllocator initAllocator(boolean reservoirEnabled, int bufSize, int bufCount, int minAllocSize) -
fillByteBuffAllocator
private void fillByteBuffAllocator(org.apache.hadoop.hbase.io.ByteBuffAllocator alloc, int bufCount) -
testReaderWithoutBlockCache
- Throws:
Exception
-
testReaderWithLRUBlockCache
Test case for HBASE-22127 in LruBlockCache.- Throws:
Exception
-
initCombinedBlockCache
-
testReaderWithCombinedBlockCache
Test case for HBASE-22127 in CombinedBlockCache- Throws:
Exception
-
testReaderBlockAllocationWithLRUCache
Tests that we properly allocate from the off-heap or on-heap when LRUCache is configured. In this case, the determining factor is whether we end up caching the block or not. So the below test cases try different permutations of enabling/disabling via CacheConfig and via user request (cacheblocks), along with different expected block types.- Throws:
IOException
-
testReaderBlockAllocationWithLRUCache
private void testReaderBlockAllocationWithLRUCache(boolean cacheConfigCacheBlockOnRead, boolean cacheBlock, org.apache.hadoop.hbase.io.hfile.BlockType blockType, boolean expectSharedMem) throws IOException - Throws:
IOException
-
testReaderBlockAllocationWithCombinedCache
Tests that we properly allocate from the off-heap or on-heap when CombinedCache is configured. In this case, we should always use off-heap unless the block is an INDEX (which always goes to L1 cache which is on-heap)- Throws:
IOException
-
testReaderBlockAllocationWithCombinedCache
private void testReaderBlockAllocationWithCombinedCache(boolean cacheConfigCacheBlockOnRead, boolean cacheBlock, org.apache.hadoop.hbase.io.hfile.BlockType blockType, boolean expectSharedMem) throws IOException - Throws:
IOException
-
readAtOffsetWithAllocationAsserts
private long readAtOffsetWithAllocationAsserts(org.apache.hadoop.hbase.io.ByteBuffAllocator alloc, org.apache.hadoop.hbase.io.hfile.HFile.Reader reader, long offset, boolean cacheBlock, org.apache.hadoop.hbase.io.hfile.BlockType blockType, boolean expectSharedMem) throws IOException - Throws:
IOException
-
readStoreFile
private void readStoreFile(org.apache.hadoop.fs.Path storeFilePath, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.io.ByteBuffAllocator alloc) throws Exception - Throws:
Exception
-
writeStoreFile
- Throws:
IOException
-
generateKeyType
-
testEmptyHFile
Test empty HFile. Test all features work reasonably when hfile is empty of entries.- Throws:
IOException
-
testCorrupt0LengthHFile
Create 0-length hfile and show that it fails- Throws:
IOException
-
testCorruptOutOfOrderHFileWrite
- Throws:
IOException
-
truncateFile
public static void truncateFile(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.Path dst) throws IOException - Throws:
IOException
-
testCorruptTruncatedHFile
Create a truncated hfile and verify that exception thrown.- Throws:
IOException
-
writeSomeRecords
private int writeSomeRecords(org.apache.hadoop.hbase.io.hfile.HFile.Writer writer, int start, int n, boolean useTags) throws IOException - Throws:
IOException
-
readAllRecords
private void readAllRecords(org.apache.hadoop.hbase.io.hfile.HFileScanner scanner) throws IOException - Throws:
IOException
-
readAndCheckbytes
private int readAndCheckbytes(org.apache.hadoop.hbase.io.hfile.HFileScanner scanner, int start, int n) throws IOException - Throws:
IOException
-
getSomeKey
-
writeRecords
private void writeRecords(org.apache.hadoop.hbase.io.hfile.HFile.Writer writer, boolean useTags) throws IOException - Throws:
IOException
-
createFSOutput
private org.apache.hadoop.fs.FSDataOutputStream createFSOutput(org.apache.hadoop.fs.Path name) throws IOException - Throws:
IOException
-
basicWithSomeCodec
test none codecs- Throws:
IOException
-
testTFileFeatures
- Throws:
IOException
-
testHFilefeaturesInternals
- Throws:
IOException
-
writeNumMetablocks
-
someTestingWithMetaBlock
-
readNumMetablocks
private void readNumMetablocks(org.apache.hadoop.hbase.io.hfile.HFile.Reader reader, int n) throws IOException - Throws:
IOException
-
someReadingWithMetaBlock
private void someReadingWithMetaBlock(org.apache.hadoop.hbase.io.hfile.HFile.Reader reader) throws IOException - Throws:
IOException
-
metablocks
- Throws:
Exception
-
testMetaBlocks
- Throws:
Exception
-
testNullMetaBlocks
- Throws:
Exception
-
testCompressionOrdinance
Make sure the ordinals for our compression algorithms do not change on us. -
testShortMidpointSameQual
-
getCell
-
testGetShortMidpoint
-
testDBEShipped
- Throws:
IOException
-
testReaderWithTinyLfuCombinedBlockCache
Test case for CombinedBlockCache with TinyLfu as L1 cache- Throws:
Exception
-
testReaderWithAdaptiveLruCombinedBlockCache
Test case for CombinedBlockCache with AdaptiveLRU as L1 cache- Throws:
Exception
-
testReaderWithLruCombinedBlockCache
Test case for CombinedBlockCache with AdaptiveLRU as L1 cache- Throws:
Exception
-
testReaderCombinedCache
- Throws:
Exception
-
testHFileContextBuilderWithIndexEncoding
- Throws:
IOException
-