001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.backup; 019 020import static org.junit.Assert.assertFalse; 021import static org.junit.Assert.assertTrue; 022 023import java.util.ArrayList; 024import java.util.Collection; 025import java.util.List; 026import org.apache.hadoop.hbase.HBaseClassTestRule; 027import org.apache.hadoop.hbase.TableName; 028import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; 029import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; 030import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; 031import org.apache.hadoop.hbase.backup.impl.TableBackupClient; 032import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage; 033import org.apache.hadoop.hbase.client.Admin; 034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 035import org.apache.hadoop.hbase.client.Connection; 036import org.apache.hadoop.hbase.client.ConnectionFactory; 037import org.apache.hadoop.hbase.client.Put; 038import org.apache.hadoop.hbase.client.Table; 039import org.apache.hadoop.hbase.client.TableDescriptor; 040import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 041import org.apache.hadoop.hbase.testclassification.LargeTests; 042import org.apache.hadoop.hbase.util.Bytes; 043import org.apache.hadoop.util.ToolRunner; 044import org.junit.Assert; 045import org.junit.ClassRule; 046import org.junit.Test; 047import org.junit.experimental.categories.Category; 048import org.junit.runner.RunWith; 049import org.junit.runners.Parameterized; 050import org.slf4j.Logger; 051import org.slf4j.LoggerFactory; 052 053import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 054 055@Category(LargeTests.class) 056@RunWith(Parameterized.class) 057public class TestIncrementalBackupWithFailures extends TestBackupBase { 058 059 @ClassRule 060 public static final HBaseClassTestRule CLASS_RULE = 061 HBaseClassTestRule.forClass(TestIncrementalBackupWithFailures.class); 062 063 private static final Logger LOG = 064 LoggerFactory.getLogger(TestIncrementalBackupWithFailures.class); 065 066 @Parameterized.Parameters 067 public static Collection<Object[]> data() { 068 provider = "multiwal"; 069 List<Object[]> params = new ArrayList<Object[]>(); 070 params.add(new Object[] { Boolean.TRUE }); 071 return params; 072 } 073 074 public TestIncrementalBackupWithFailures(Boolean b) { 075 } 076 077 // implement all test cases in 1 test since incremental backup/restore has dependencies 078 @Test 079 public void testIncBackupRestore() throws Exception { 080 int ADD_ROWS = 99; 081 // #1 - create full backup for all tables 082 LOG.info("create full backup image for all tables"); 083 084 List<TableName> tables = Lists.newArrayList(table1, table2); 085 final byte[] fam3Name = Bytes.toBytes("f3"); 086 TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc) 087 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)).build(); 088 TEST_UTIL.getAdmin().modifyTable(newTable1Desc); 089 090 Connection conn = ConnectionFactory.createConnection(conf1); 091 int NB_ROWS_FAM3 = 6; 092 insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close(); 093 094 Admin admin = conn.getAdmin(); 095 BackupAdminImpl client = new BackupAdminImpl(conn); 096 097 BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); 098 String backupIdFull = client.backupTables(request); 099 100 assertTrue(checkSucceeded(backupIdFull)); 101 102 // #2 - insert some data to table 103 Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS); 104 LOG.debug("writing " + ADD_ROWS + " rows to " + table1); 105 106 Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3); 107 t1.close(); 108 LOG.debug("written " + ADD_ROWS + " rows to " + table1); 109 110 Table t2 = conn.getTable(table2); 111 Put p2; 112 for (int i = 0; i < 5; i++) { 113 p2 = new Put(Bytes.toBytes("row-t2" + i)); 114 p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); 115 t2.put(p2); 116 } 117 118 Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + 5); 119 t2.close(); 120 LOG.debug("written " + 5 + " rows to " + table2); 121 122 // #3 - incremental backup for multiple tables 123 incrementalBackupWithFailures(); 124 125 admin.close(); 126 conn.close(); 127 128 } 129 130 private void incrementalBackupWithFailures() throws Exception { 131 conf1.set(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS, 132 IncrementalTableBackupClientForTest.class.getName()); 133 int maxStage = Stage.values().length - 1; 134 // Fail stages between 0 and 4 inclusive 135 for (int stage = 0; stage <= maxStage; stage++) { 136 LOG.info("Running stage " + stage); 137 runBackupAndFailAtStage(stage); 138 } 139 } 140 141 private void runBackupAndFailAtStage(int stage) throws Exception { 142 143 conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage); 144 try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { 145 int before = table.getBackupHistory().size(); 146 String[] args = new String[] { "create", "incremental", BACKUP_ROOT_DIR, "-t", 147 table1.getNameAsString() + "," + table2.getNameAsString() }; 148 // Run backup 149 int ret = ToolRunner.run(conf1, new BackupDriver(), args); 150 assertFalse(ret == 0); 151 List<BackupInfo> backups = table.getBackupHistory(); 152 int after = table.getBackupHistory().size(); 153 154 assertTrue(after == before + 1); 155 for (BackupInfo data : backups) { 156 if (data.getType() == BackupType.FULL) { 157 assertTrue(data.getState() == BackupState.COMPLETE); 158 } else { 159 assertTrue(data.getState() == BackupState.FAILED); 160 } 161 } 162 } 163 } 164 165}