001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.backup; 019 020import static org.junit.Assert.assertFalse; 021import static org.junit.Assert.assertTrue; 022 023import org.apache.hadoop.conf.Configuration; 024import org.apache.hadoop.fs.FileSystem; 025import org.apache.hadoop.fs.Path; 026import org.apache.hadoop.hbase.HBaseClassTestRule; 027import org.apache.hadoop.hbase.HBaseTestingUtil; 028import org.apache.hadoop.hbase.HConstants; 029import org.apache.hadoop.hbase.TableName; 030import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; 031import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob; 032import org.apache.hadoop.hbase.backup.util.BackupUtils; 033import org.apache.hadoop.hbase.client.Admin; 034import org.apache.hadoop.hbase.client.ConnectionFactory; 035import org.apache.hadoop.hbase.testclassification.LargeTests; 036import org.junit.BeforeClass; 037import org.junit.ClassRule; 038import org.junit.Test; 039import org.junit.experimental.categories.Category; 040import org.slf4j.Logger; 041import org.slf4j.LoggerFactory; 042 043@Category(LargeTests.class) 044public class TestRemoteRestore extends TestBackupBase { 045 046 @ClassRule 047 public static final HBaseClassTestRule CLASS_RULE = 048 HBaseClassTestRule.forClass(TestRemoteRestore.class); 049 050 private static final Logger LOG = LoggerFactory.getLogger(TestRemoteRestore.class); 051 052 /** 053 * Setup Cluster with appropriate configurations before running tests. 054 * @throws Exception if starting the mini cluster or setting up the tables fails 055 */ 056 @BeforeClass 057 public static void setUp() throws Exception { 058 TEST_UTIL = new HBaseTestingUtil(); 059 conf1 = TEST_UTIL.getConfiguration(); 060 useSecondCluster = true; 061 setUpHelper(); 062 } 063 064 /** 065 * Verify that a remote restore on a single table is successful. 066 * @throws Exception if doing the backup or an operation on the tables fails 067 */ 068 @Test 069 public void testFullRestoreRemote() throws Exception { 070 LOG.info("test remote full backup on a single table"); 071 String backupId = 072 backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR); 073 LOG.info("backup complete"); 074 TableName[] tableset = new TableName[] { table1 }; 075 TableName[] tablemap = new TableName[] { table1_restore }; 076 getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, 077 false, tableset, tablemap, false)); 078 Admin hba = TEST_UTIL.getAdmin(); 079 assertTrue(hba.tableExists(table1_restore)); 080 TEST_UTIL.deleteTable(table1_restore); 081 hba.close(); 082 } 083 084 /** 085 * Verify that restore jobs can be run on a standalone mapreduce cluster. Ensures hfiles output 086 * via {@link MapReduceHFileSplitterJob} exist on correct filesystem. 087 * @throws Exception if doing the backup or an operation on the tables fails 088 */ 089 @Test 090 public void testFullRestoreRemoteWithAlternateRestoreOutputDir() throws Exception { 091 LOG.info("test remote full backup on a single table with alternate restore output dir"); 092 String backupId = 093 backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR); 094 LOG.info("backup complete"); 095 TableName[] tableset = new TableName[] { table1 }; 096 TableName[] tablemap = new TableName[] { table1_restore }; 097 098 HBaseTestingUtil mrTestUtil = new HBaseTestingUtil(); 099 mrTestUtil.setZkCluster(TEST_UTIL.getZkCluster()); 100 mrTestUtil.startMiniDFSCluster(3); 101 mrTestUtil.startMiniMapReduceCluster(); 102 103 Configuration testUtilConf = TEST_UTIL.getConnection().getConfiguration(); 104 Configuration conf = new Configuration(mrTestUtil.getConfiguration()); 105 conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, 106 testUtilConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); 107 conf.set(HConstants.MASTER_ADDRS_KEY, testUtilConf.get(HConstants.MASTER_ADDRS_KEY)); 108 109 new BackupAdminImpl(ConnectionFactory.createConnection(conf)) 110 .restore(new RestoreRequest.Builder().withBackupRootDir(BACKUP_REMOTE_ROOT_DIR) 111 .withRestoreRootDir(BACKUP_ROOT_DIR).withBackupId(backupId).withCheck(false) 112 .withFromTables(tableset).withToTables(tablemap).withOvewrite(false).build()); 113 114 Path hfileOutputPath = new Path( 115 new Path(conf.get(MapReduceHFileSplitterJob.BULK_OUTPUT_CONF_KEY)).toUri().getPath()); 116 117 // files exist on hbase cluster 118 FileSystem fileSystem = FileSystem.get(TEST_UTIL.getConfiguration()); 119 assertTrue(fileSystem.exists(hfileOutputPath)); 120 121 // files don't exist on MR cluster 122 fileSystem = FileSystem.get(conf); 123 assertFalse(fileSystem.exists(hfileOutputPath)); 124 125 Admin hba = TEST_UTIL.getAdmin(); 126 assertTrue(hba.tableExists(table1_restore)); 127 TEST_UTIL.deleteTable(table1_restore); 128 hba.close(); 129 } 130}