001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.backup; 019 020import static org.junit.Assert.assertTrue; 021 022import java.util.List; 023import org.apache.hadoop.hbase.HBaseClassTestRule; 024import org.apache.hadoop.hbase.TableName; 025import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; 026import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; 027import org.apache.hadoop.hbase.backup.impl.BulkLoad; 028import org.apache.hadoop.hbase.backup.util.BackupUtils; 029import org.apache.hadoop.hbase.client.Admin; 030import org.apache.hadoop.hbase.client.Connection; 031import org.apache.hadoop.hbase.client.ConnectionFactory; 032import org.apache.hadoop.hbase.client.Put; 033import org.apache.hadoop.hbase.client.Table; 034import org.apache.hadoop.hbase.testclassification.LargeTests; 035import org.apache.hadoop.hbase.tool.TestBulkLoadHFiles; 036import org.apache.hadoop.hbase.util.Bytes; 037import org.junit.Assert; 038import org.junit.ClassRule; 039import org.junit.Test; 040import org.junit.experimental.categories.Category; 041import org.slf4j.Logger; 042import org.slf4j.LoggerFactory; 043 044import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 045 046/** 047 * 1. Create table t1 2. Load data to t1 3 Full backup t1 4 Load data to t1 5 bulk load into t1 6 048 * Incremental backup t1 049 */ 050@Category(LargeTests.class) 051public class TestIncrementalBackupWithBulkLoad extends TestBackupBase { 052 053 @ClassRule 054 public static final HBaseClassTestRule CLASS_RULE = 055 HBaseClassTestRule.forClass(TestIncrementalBackupWithBulkLoad.class); 056 057 private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackupDeleteTable.class); 058 059 // implement all test cases in 1 test since incremental backup/restore has dependencies 060 @Test 061 public void TestIncBackupDeleteTable() throws Exception { 062 String testName = "TestIncBackupDeleteTable"; 063 // #1 - create full backup for all tables 064 LOG.info("create full backup image for all tables"); 065 066 List<TableName> tables = Lists.newArrayList(table1); 067 Connection conn = ConnectionFactory.createConnection(conf1); 068 Admin admin = conn.getAdmin(); 069 BackupAdminImpl client = new BackupAdminImpl(conn); 070 071 BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); 072 String backupIdFull = client.backupTables(request); 073 074 assertTrue(checkSucceeded(backupIdFull)); 075 076 // #2 - insert some data to table table1 077 Table t1 = conn.getTable(table1); 078 Put p1; 079 for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { 080 p1 = new Put(Bytes.toBytes("row-t1" + i)); 081 p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); 082 t1.put(p1); 083 } 084 085 Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2); 086 t1.close(); 087 088 int NB_ROWS2 = 20; 089 LOG.debug("bulk loading into " + testName); 090 int actual = 091 TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, qualName, false, null, 092 new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, 093 new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, }, 094 true, false, true, NB_ROWS_IN_BATCH * 2, NB_ROWS2); 095 096 // #3 - incremental backup for table1 097 tables = Lists.newArrayList(table1); 098 request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); 099 String backupIdIncMultiple = client.backupTables(request); 100 assertTrue(checkSucceeded(backupIdIncMultiple)); 101 // #4 bulk load again 102 LOG.debug("bulk loading into " + testName); 103 int actual1 = 104 TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, qualName, false, null, 105 new byte[][][] { new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("qqq") }, 106 new byte[][] { Bytes.toBytes("rrr"), Bytes.toBytes("sss") }, }, 107 true, false, true, NB_ROWS_IN_BATCH * 2 + actual, NB_ROWS2); 108 109 // #5 - incremental backup for table1 110 tables = Lists.newArrayList(table1); 111 request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); 112 String backupIdIncMultiple1 = client.backupTables(request); 113 assertTrue(checkSucceeded(backupIdIncMultiple1)); 114 // Delete all data in table1 115 TEST_UTIL.deleteTableData(table1); 116 117 // #6 - restore incremental backup for table1 118 TableName[] tablesRestoreIncMultiple = new TableName[] { table1 }; 119 // TableName[] tablesMapIncMultiple = new TableName[] { table1_restore }; 120 client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple1, false, 121 tablesRestoreIncMultiple, tablesRestoreIncMultiple, true)); 122 123 Table hTable = conn.getTable(table1); 124 Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2 + actual + actual1); 125 request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); 126 127 backupIdFull = client.backupTables(request); 128 try (final BackupSystemTable table = new BackupSystemTable(conn)) { 129 List<BulkLoad> bulkLoads = table.readBulkloadRows(tables); 130 assertTrue("bulkloads still has " + bulkLoads.size() + " entries", bulkLoads.isEmpty()); 131 } 132 assertTrue(checkSucceeded(backupIdFull)); 133 134 hTable.close(); 135 admin.close(); 136 conn.close(); 137 } 138}