Class AbstractTestWALReplay
java.lang.Object
org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay
- Direct Known Subclasses:
TestAsyncWALReplay
,TestWALReplay
Test replay of edits out of a WAL split.
-
Nested Class Summary
Modifier and TypeClassDescriptionstatic class
(package private) static class
(package private) static class
-
Field Summary
Modifier and TypeFieldDescriptionprivate org.apache.hadoop.conf.Configuration
final org.junit.rules.TestName
private final org.apache.hadoop.hbase.util.EnvironmentEdge
private org.apache.hadoop.fs.FileSystem
private org.apache.hadoop.fs.Path
private static final org.slf4j.Logger
private org.apache.hadoop.fs.Path
private String
private org.apache.hadoop.fs.Path
(package private) static final HBaseTestingUtility
private org.apache.hadoop.hbase.wal.WALFactory
-
Constructor Summary
-
Method Summary
Modifier and TypeMethodDescriptionstatic List<org.apache.hadoop.hbase.client.Put>
addRegionEdits
(byte[] rowName, byte[] family, int count, org.apache.hadoop.hbase.util.EnvironmentEdge ee, org.apache.hadoop.hbase.regionserver.Region r, String qualifierPrefix) private void
addWALEdits
(org.apache.hadoop.hbase.TableName tableName, org.apache.hadoop.hbase.HRegionInfo hri, byte[] rowName, byte[] family, int count, org.apache.hadoop.hbase.util.EnvironmentEdge ee, org.apache.hadoop.hbase.wal.WAL wal, org.apache.hadoop.hbase.HTableDescriptor htd, org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl mvcc, NavigableMap<byte[], Integer> scopes) private org.apache.hadoop.hbase.HTableDescriptor
createBasic1FamilyHTD
(org.apache.hadoop.hbase.TableName tableName) private org.apache.hadoop.hbase.HRegionInfo
createBasic3FamilyHRegionInfo
(org.apache.hadoop.hbase.TableName tableName) private org.apache.hadoop.hbase.HTableDescriptor
createBasic3FamilyHTD
(org.apache.hadoop.hbase.TableName tableName) private org.apache.hadoop.hbase.regionserver.wal.FSWALEntry
createFSWALEntry
(org.apache.hadoop.hbase.HTableDescriptor htd, org.apache.hadoop.hbase.HRegionInfo hri, long sequence, byte[] rowName, byte[] family, org.apache.hadoop.hbase.util.EnvironmentEdge ee, org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl mvcc, int index, NavigableMap<byte[], Integer> scopes) private AbstractTestWALReplay.MockWAL
protected abstract org.apache.hadoop.hbase.wal.WAL
createWAL
(org.apache.hadoop.conf.Configuration c, org.apache.hadoop.fs.Path hbaseRootDir, String logName) private org.apache.hadoop.hbase.wal.WALEdit
createWALEdit
(byte[] rowName, byte[] family, org.apache.hadoop.hbase.util.EnvironmentEdge ee, int index) private org.apache.hadoop.hbase.wal.WALKeyImpl
createWALKey
(org.apache.hadoop.hbase.TableName tableName, org.apache.hadoop.hbase.HRegionInfo hri, org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl mvcc, NavigableMap<byte[], Integer> scopes) private void
deleteDir
(org.apache.hadoop.fs.Path p) private int
getScannedCount
(org.apache.hadoop.hbase.regionserver.RegionScanner scanner) private org.apache.hadoop.fs.Path
runWALSplit
(org.apache.hadoop.conf.Configuration c) void
setUp()
static void
void
tearDown()
static void
void
test2727()
Tests for hbase-2727.void
HRegion test case that is made of a major compacted HFile (created with three bulk loaded files) and an edit in the memstore.void
testcase for https://issues.apache.org/jira/browse/HBASE-15252private void
testNameConflictWhenSplit
(boolean largeFirst) testcase for https://issues.apache.org/jira/browse/HBASE-14949.void
void
void
Test case of HRegion that is only made out of bulk loaded files.void
Test that we could recover the data correctly after aborting flush.void
Test that we recover correctly when there is a failure in between the flushes.void
void
Create an HRegion with the result of a WAL split and test we only see the good editsvoid
Test writing edits into an HRegion, closing it, splitting logs, opening Region again.void
private void
writerWALFile
(org.apache.hadoop.fs.Path file, List<org.apache.hadoop.hbase.regionserver.wal.FSWALEntry> entries)
-
Field Details
-
LOG
-
TEST_UTIL
-
ee
-
hbaseRootDir
-
logName
-
oldLogDir
-
logDir
-
fs
-
conf
-
wals
-
currentTest
-
-
Constructor Details
-
AbstractTestWALReplay
public AbstractTestWALReplay()
-
-
Method Details
-
setUpBeforeClass
- Throws:
Exception
-
tearDownAfterClass
- Throws:
Exception
-
setUp
- Throws:
Exception
-
tearDown
- Throws:
Exception
-
deleteDir
- Throws:
IOException
-
testReplayEditsAfterRegionMovedWithMultiCF
- Throws:
Exception
-
test2727
Tests for hbase-2727.- Throws:
Exception
- See Also:
-
testRegionMadeOfBulkLoadedFilesOnly
public void testRegionMadeOfBulkLoadedFilesOnly() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedExceptionTest case of HRegion that is only made out of bulk loaded files. Assert that we don't 'crash'. -
testCompactedBulkLoadedFiles
public void testCompactedBulkLoadedFiles() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedExceptionHRegion test case that is made of a major compacted HFile (created with three bulk loaded files) and an edit in the memstore. This is for HBASE-10958 "[dataloss] Bulk loading with seqids can prevent some log entries from being replayed" -
testReplayEditsWrittenViaHRegion
public void testReplayEditsWrittenViaHRegion() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedExceptionTest writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify seqids. -
testReplayEditsAfterPartialFlush
public void testReplayEditsAfterPartialFlush() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedExceptionTest that we recover correctly when there is a failure in between the flushes. i.e. Some stores got flushed but others did not. Unfortunately, there is no easy hook to flush at a store level. The way we get around this is by flushing at the region level, and then deleting the recently flushed store file for one of the Stores. This would put us back in the situation where all but that store got flushed and the region died. We restart Region again, and verify that the edits were replayed. -
testReplayEditsAfterAbortingFlush
Test that we could recover the data correctly after aborting flush. In the test, first we abort flush after writing some data, then writing more data and flush again, at last verify the data.- Throws:
IOException
-
getScannedCount
private int getScannedCount(org.apache.hadoop.hbase.regionserver.RegionScanner scanner) throws IOException - Throws:
IOException
-
testReplayEditsWrittenIntoWAL
Create an HRegion with the result of a WAL split and test we only see the good edits- Throws:
Exception
-
testSequentialEditLogSeqNum
- Throws:
IOException
-
testDatalossWhenInputError
testcase for https://issues.apache.org/jira/browse/HBASE-15252- Throws:
Exception
-
testNameConflictWhenSplit
private void testNameConflictWhenSplit(boolean largeFirst) throws IOException, org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException testcase for https://issues.apache.org/jira/browse/HBASE-14949.- Throws:
IOException
org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException
-
testNameConflictWhenSplit0
public void testNameConflictWhenSplit0() throws IOException, org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException- Throws:
IOException
org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException
-
testNameConflictWhenSplit1
public void testNameConflictWhenSplit1() throws IOException, org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException- Throws:
IOException
org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException
-
createBasic1FamilyHTD
private org.apache.hadoop.hbase.HTableDescriptor createBasic1FamilyHTD(org.apache.hadoop.hbase.TableName tableName) -
createMockWAL
- Throws:
IOException
-
createWALEdit
private org.apache.hadoop.hbase.wal.WALEdit createWALEdit(byte[] rowName, byte[] family, org.apache.hadoop.hbase.util.EnvironmentEdge ee, int index) -
addRegionEdits
public static List<org.apache.hadoop.hbase.client.Put> addRegionEdits(byte[] rowName, byte[] family, int count, org.apache.hadoop.hbase.util.EnvironmentEdge ee, org.apache.hadoop.hbase.regionserver.Region r, String qualifierPrefix) throws IOException - Throws:
IOException
-
createBasic3FamilyHRegionInfo
private org.apache.hadoop.hbase.HRegionInfo createBasic3FamilyHRegionInfo(org.apache.hadoop.hbase.TableName tableName) -
runWALSplit
private org.apache.hadoop.fs.Path runWALSplit(org.apache.hadoop.conf.Configuration c) throws IOException - Throws:
IOException
-
createBasic3FamilyHTD
private org.apache.hadoop.hbase.HTableDescriptor createBasic3FamilyHTD(org.apache.hadoop.hbase.TableName tableName) -
writerWALFile
private void writerWALFile(org.apache.hadoop.fs.Path file, List<org.apache.hadoop.hbase.regionserver.wal.FSWALEntry> entries) throws IOException, org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException - Throws:
IOException
org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException
-
createWAL
protected abstract org.apache.hadoop.hbase.wal.WAL createWAL(org.apache.hadoop.conf.Configuration c, org.apache.hadoop.fs.Path hbaseRootDir, String logName) throws IOException - Throws:
IOException
-