001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.replication; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.fail; 022 023import java.io.IOException; 024import org.apache.hadoop.conf.Configuration; 025import org.apache.hadoop.fs.FileSystem; 026import org.apache.hadoop.fs.Path; 027import org.apache.hadoop.hbase.HBaseClassTestRule; 028import org.apache.hadoop.hbase.HBaseConfiguration; 029import org.apache.hadoop.hbase.HBaseTestingUtil; 030import org.apache.hadoop.hbase.HConstants; 031import org.apache.hadoop.hbase.TableName; 032import org.apache.hadoop.hbase.client.Admin; 033import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 034import org.apache.hadoop.hbase.client.Put; 035import org.apache.hadoop.hbase.client.Result; 036import org.apache.hadoop.hbase.client.ResultScanner; 037import org.apache.hadoop.hbase.client.Scan; 038import org.apache.hadoop.hbase.client.Table; 039import org.apache.hadoop.hbase.client.TableDescriptor; 040import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 041import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; 042import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; 043import org.apache.hadoop.hbase.testclassification.LargeTests; 044import org.apache.hadoop.hbase.testclassification.ReplicationTests; 045import org.apache.hadoop.hbase.util.Bytes; 046import org.apache.hadoop.hbase.util.CommonFSUtils; 047import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 048import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; 049import org.apache.hadoop.mapreduce.Job; 050import org.junit.AfterClass; 051import org.junit.Assert; 052import org.junit.BeforeClass; 053import org.junit.ClassRule; 054import org.junit.Test; 055import org.junit.experimental.categories.Category; 056import org.slf4j.Logger; 057import org.slf4j.LoggerFactory; 058 059import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; 060import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; 061 062@Category({ ReplicationTests.class, LargeTests.class }) 063public class TestVerifyReplicationCrossDiffHdfs { 064 @ClassRule 065 public static final HBaseClassTestRule CLASS_RULE = 066 HBaseClassTestRule.forClass(TestVerifyReplicationCrossDiffHdfs.class); 067 068 private static final Logger LOG = 069 LoggerFactory.getLogger(TestVerifyReplicationCrossDiffHdfs.class); 070 071 private static HBaseTestingUtil util1; 072 private static HBaseTestingUtil util2; 073 private static HBaseTestingUtil mapReduceUtil = new HBaseTestingUtil(); 074 075 private static Configuration conf1 = HBaseConfiguration.create(); 076 private static Configuration conf2; 077 078 private static final byte[] FAMILY = Bytes.toBytes("f"); 079 private static final byte[] QUALIFIER = Bytes.toBytes("q"); 080 private static final String PEER_ID = "1"; 081 private static final TableName TABLE_NAME = TableName.valueOf("testVerifyRepCrossDiffHDFS"); 082 083 @BeforeClass 084 public static void setUpBeforeClass() throws Exception { 085 conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); 086 util1 = new HBaseTestingUtil(conf1); 087 util1.startMiniZKCluster(); 088 MiniZooKeeperCluster miniZK = util1.getZkCluster(); 089 conf1 = util1.getConfiguration(); 090 091 conf2 = HBaseConfiguration.create(conf1); 092 conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); 093 util2 = new HBaseTestingUtil(conf2); 094 util2.setZkCluster(miniZK); 095 096 util1.startMiniCluster(); 097 util2.startMiniCluster(); 098 099 createTestingTable(util1.getAdmin()); 100 createTestingTable(util2.getAdmin()); 101 addTestingPeer(); 102 103 LOG.info("Start to load some data to source cluster."); 104 loadSomeData(); 105 106 LOG.info("Start mini MapReduce cluster."); 107 mapReduceUtil.setZkCluster(miniZK); 108 mapReduceUtil.startMiniMapReduceCluster(); 109 } 110 111 private static void createTestingTable(Admin admin) throws IOException { 112 TableDescriptor table = TableDescriptorBuilder.newBuilder(TABLE_NAME) 113 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(100) 114 .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) 115 .build(); 116 admin.createTable(table); 117 } 118 119 private static void addTestingPeer() throws IOException { 120 ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() 121 .setClusterKey(util2.getClusterKey()).setReplicateAllUserTables(false) 122 .setTableCFsMap(ImmutableMap.of(TABLE_NAME, ImmutableList.of())).build(); 123 util1.getAdmin().addReplicationPeer(PEER_ID, rpc); 124 } 125 126 private static void loadSomeData() throws IOException, InterruptedException { 127 int numOfRows = 10; 128 try (Table table = util1.getConnection().getTable(TABLE_NAME)) { 129 for (int i = 0; i < numOfRows; i++) { 130 table.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i))); 131 } 132 } 133 // Wait some time until the peer received those rows. 134 Result[] results = null; 135 try (Table table = util2.getConnection().getTable(TABLE_NAME)) { 136 for (int i = 0; i < 100; i++) { 137 try (ResultScanner rs = table.getScanner(new Scan())) { 138 results = rs.next(numOfRows); 139 if (results == null || results.length < numOfRows) { 140 LOG.info("Retrying, wait until the peer received all the rows, currentRows:" 141 + (results == null ? 0 : results.length)); 142 Thread.sleep(100); 143 } 144 } 145 } 146 } 147 Assert.assertNotNull(results); 148 Assert.assertEquals(10, results.length); 149 } 150 151 @AfterClass 152 public static void tearDownClass() throws Exception { 153 if (mapReduceUtil != null) { 154 mapReduceUtil.shutdownMiniCluster(); 155 } 156 if (util2 != null) { 157 util2.shutdownMiniCluster(); 158 } 159 if (util1 != null) { 160 util1.shutdownMiniCluster(); 161 } 162 } 163 164 @Test 165 public void testVerifyRepBySnapshot() throws Exception { 166 Path rootDir = CommonFSUtils.getRootDir(conf1); 167 FileSystem fs = rootDir.getFileSystem(conf1); 168 String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); 169 SnapshotTestingUtils.createSnapshotAndValidate(util1.getAdmin(), TABLE_NAME, 170 Bytes.toString(FAMILY), sourceSnapshotName, rootDir, fs, true); 171 172 // Take target snapshot 173 Path peerRootDir = CommonFSUtils.getRootDir(conf2); 174 FileSystem peerFs = peerRootDir.getFileSystem(conf2); 175 String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); 176 SnapshotTestingUtils.createSnapshotAndValidate(util2.getAdmin(), TABLE_NAME, 177 Bytes.toString(FAMILY), peerSnapshotName, peerRootDir, peerFs, true); 178 179 String peerFSAddress = peerFs.getUri().toString(); 180 String temPath1 = new Path(fs.getUri().toString(), "/tmp1").toString(); 181 String temPath2 = "/tmp2"; 182 183 String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, 184 "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, 185 "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, 186 "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(conf2), PEER_ID, TABLE_NAME.toString() }; 187 188 // Use the yarn's config override the source cluster's config. 189 Configuration newConf = HBaseConfiguration.create(conf1); 190 HBaseConfiguration.merge(newConf, mapReduceUtil.getConfiguration()); 191 newConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); 192 CommonFSUtils.setRootDir(newConf, CommonFSUtils.getRootDir(conf1)); 193 Job job = new VerifyReplication().createSubmittableJob(newConf, args); 194 if (job == null) { 195 fail("Job wasn't created, see the log"); 196 } 197 if (!job.waitForCompletion(true)) { 198 fail("Job failed, see the log"); 199 } 200 assertEquals(10, 201 job.getCounters().findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); 202 assertEquals(0, 203 job.getCounters().findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); 204 } 205}