001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.backup; 019 020import static org.junit.Assert.assertTrue; 021 022import java.io.IOException; 023import java.util.concurrent.CountDownLatch; 024import org.apache.hadoop.hbase.HBaseClassTestRule; 025import org.apache.hadoop.hbase.HBaseTestingUtil; 026import org.apache.hadoop.hbase.HConstants; 027import org.apache.hadoop.hbase.TableName; 028import org.apache.hadoop.hbase.backup.util.BackupUtils; 029import org.apache.hadoop.hbase.client.Admin; 030import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 031import org.apache.hadoop.hbase.client.Connection; 032import org.apache.hadoop.hbase.client.ConnectionFactory; 033import org.apache.hadoop.hbase.client.Put; 034import org.apache.hadoop.hbase.client.Table; 035import org.apache.hadoop.hbase.client.TableDescriptor; 036import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 037import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils; 038import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; 039import org.apache.hadoop.hbase.testclassification.LargeTests; 040import org.apache.hadoop.hbase.util.Bytes; 041import org.junit.Assert; 042import org.junit.BeforeClass; 043import org.junit.ClassRule; 044import org.junit.Test; 045import org.junit.experimental.categories.Category; 046import org.slf4j.Logger; 047import org.slf4j.LoggerFactory; 048 049import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 050 051@Category(LargeTests.class) 052public class TestRemoteBackup extends TestBackupBase { 053 054 @ClassRule 055 public static final HBaseClassTestRule CLASS_RULE = 056 HBaseClassTestRule.forClass(TestRemoteBackup.class); 057 058 private static final Logger LOG = LoggerFactory.getLogger(TestRemoteBackup.class); 059 060 /** 061 * Setup Cluster with appropriate configurations before running tests. 062 * @throws Exception if starting the mini cluster or setting up the tables fails 063 */ 064 @BeforeClass 065 public static void setUp() throws Exception { 066 TEST_UTIL = new HBaseTestingUtil(); 067 conf1 = TEST_UTIL.getConfiguration(); 068 conf1.setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 10); 069 useSecondCluster = true; 070 setUpHelper(); 071 } 072 073 /** 074 * Verify that a remote full backup is created on a single table with data correctly. 075 * @throws Exception if an operation on the table fails 076 */ 077 @Test 078 public void testFullBackupRemote() throws Exception { 079 LOG.info("test remote full backup on a single table"); 080 final CountDownLatch latch = new CountDownLatch(1); 081 final int NB_ROWS_IN_FAM3 = 6; 082 final byte[] fam3Name = Bytes.toBytes("f3"); 083 final byte[] fam2Name = Bytes.toBytes("f2"); 084 final Connection conn = ConnectionFactory.createConnection(conf1); 085 Thread t = new Thread(() -> { 086 try { 087 latch.await(); 088 } catch (InterruptedException ie) { 089 } 090 try { 091 Table t1 = conn.getTable(table1); 092 Put p1; 093 for (int i = 0; i < NB_ROWS_IN_FAM3; i++) { 094 p1 = new Put(Bytes.toBytes("row-t1" + i)); 095 p1.addColumn(fam3Name, qualName, Bytes.toBytes("val" + i)); 096 t1.put(p1); 097 } 098 LOG.debug("Wrote " + NB_ROWS_IN_FAM3 + " rows into family3"); 099 t1.close(); 100 } catch (IOException ioe) { 101 throw new RuntimeException(ioe); 102 } 103 }); 104 t.start(); 105 // family 2 is MOB enabled 106 TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc) 107 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)) 108 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam2Name).setMobEnabled(true) 109 .setMobThreshold(0L).build()) 110 .build(); 111 TEST_UTIL.getAdmin().modifyTable(newTable1Desc); 112 113 SnapshotTestingUtils.loadData(TEST_UTIL, table1, 50, fam2Name); 114 Table t1 = conn.getTable(table1); 115 int rows0 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name); 116 117 latch.countDown(); 118 String backupId = 119 backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR); 120 assertTrue(checkSucceeded(backupId)); 121 122 LOG.info("backup complete " + backupId); 123 Assert.assertEquals(TEST_UTIL.countRows(t1, famName), NB_ROWS_IN_BATCH); 124 125 t.join(); 126 Assert.assertEquals(TEST_UTIL.countRows(t1, fam3Name), NB_ROWS_IN_FAM3); 127 t1.close(); 128 129 TableName[] tablesRestoreFull = new TableName[] { table1 }; 130 131 TableName[] tablesMapFull = new TableName[] { table1_restore }; 132 133 BackupAdmin client = getBackupAdmin(); 134 client.restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, 135 tablesRestoreFull, tablesMapFull, false)); 136 137 // check tables for full restore 138 Admin hAdmin = TEST_UTIL.getAdmin(); 139 assertTrue(hAdmin.tableExists(table1_restore)); 140 141 // #5.2 - checking row count of tables for full restore 142 Table hTable = conn.getTable(table1_restore); 143 Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH); 144 int cnt3 = TEST_UTIL.countRows(hTable, fam3Name); 145 Assert.assertTrue(cnt3 >= 0 && cnt3 <= NB_ROWS_IN_FAM3); 146 147 int rows1 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name); 148 Assert.assertEquals(rows0, rows1); 149 hTable.close(); 150 151 hAdmin.close(); 152 } 153}