001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.backup; 019 020import static org.junit.Assert.assertTrue; 021 022import java.util.List; 023import org.apache.hadoop.hbase.HBaseClassTestRule; 024import org.apache.hadoop.hbase.TableName; 025import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; 026import org.apache.hadoop.hbase.backup.util.BackupUtils; 027import org.apache.hadoop.hbase.client.Admin; 028import org.apache.hadoop.hbase.client.Connection; 029import org.apache.hadoop.hbase.client.ConnectionFactory; 030import org.apache.hadoop.hbase.client.Table; 031import org.apache.hadoop.hbase.testclassification.LargeTests; 032import org.junit.Assert; 033import org.junit.ClassRule; 034import org.junit.Test; 035import org.junit.experimental.categories.Category; 036import org.slf4j.Logger; 037import org.slf4j.LoggerFactory; 038 039import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 040 041@Category(LargeTests.class) 042public class TestBackupMerge extends TestBackupBase { 043 044 @ClassRule 045 public static final HBaseClassTestRule CLASS_RULE = 046 HBaseClassTestRule.forClass(TestBackupMerge.class); 047 048 private static final Logger LOG = LoggerFactory.getLogger(TestBackupMerge.class); 049 050 @Test 051 public void TestIncBackupMergeRestore() throws Exception { 052 int ADD_ROWS = 99; 053 // #1 - create full backup for all tables 054 LOG.info("create full backup image for all tables"); 055 056 List<TableName> tables = Lists.newArrayList(table1, table2); 057 // Set custom Merge Job implementation 058 059 Connection conn = ConnectionFactory.createConnection(conf1); 060 061 Admin admin = conn.getAdmin(); 062 BackupAdminImpl client = new BackupAdminImpl(conn); 063 064 BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); 065 String backupIdFull = client.backupTables(request); 066 067 assertTrue(checkSucceeded(backupIdFull)); 068 069 // #2 - insert some data to table1 070 Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS); 071 LOG.debug("writing " + ADD_ROWS + " rows to " + table1); 072 073 Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS); 074 t1.close(); 075 LOG.debug("written " + ADD_ROWS + " rows to " + table1); 076 077 Table t2 = insertIntoTable(conn, table2, famName, 1, ADD_ROWS); 078 079 Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + ADD_ROWS); 080 t2.close(); 081 LOG.debug("written " + ADD_ROWS + " rows to " + table2); 082 083 // #3 - incremental backup for multiple tables 084 tables = Lists.newArrayList(table1, table2); 085 request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); 086 String backupIdIncMultiple = client.backupTables(request); 087 088 assertTrue(checkSucceeded(backupIdIncMultiple)); 089 090 t1 = insertIntoTable(conn, table1, famName, 2, ADD_ROWS); 091 t1.close(); 092 093 t2 = insertIntoTable(conn, table2, famName, 2, ADD_ROWS); 094 t2.close(); 095 096 // #3 - incremental backup for multiple tables 097 request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); 098 String backupIdIncMultiple2 = client.backupTables(request); 099 assertTrue(checkSucceeded(backupIdIncMultiple2)); 100 101 try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) { 102 String[] backups = new String[] { backupIdIncMultiple, backupIdIncMultiple2 }; 103 bAdmin.mergeBackups(backups); 104 } 105 106 // #6 - restore incremental backup for multiple tables, with overwrite 107 TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 }; 108 TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore }; 109 client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, 110 tablesRestoreIncMultiple, tablesMapIncMultiple, true)); 111 112 Table hTable = conn.getTable(table1_restore); 113 LOG.debug("After incremental restore: " + hTable.getDescriptor()); 114 int countRows = TEST_UTIL.countRows(hTable, famName); 115 LOG.debug("f1 has " + countRows + " rows"); 116 Assert.assertEquals(NB_ROWS_IN_BATCH + 2 * ADD_ROWS, countRows); 117 118 hTable.close(); 119 120 hTable = conn.getTable(table2_restore); 121 Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 2 * ADD_ROWS); 122 hTable.close(); 123 124 admin.close(); 125 conn.close(); 126 } 127}