001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.replication; 019 020import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_GLOBAL; 021import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT; 022import static org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY; 023import static org.junit.Assert.fail; 024 025import java.io.IOException; 026import java.util.Arrays; 027import java.util.stream.Collectors; 028import org.apache.hadoop.conf.Configuration; 029import org.apache.hadoop.hbase.HBaseClassTestRule; 030import org.apache.hadoop.hbase.HBaseConfiguration; 031import org.apache.hadoop.hbase.HBaseTestingUtility; 032import org.apache.hadoop.hbase.TableName; 033import org.apache.hadoop.hbase.Waiter.Predicate; 034import org.apache.hadoop.hbase.client.Admin; 035import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 036import org.apache.hadoop.hbase.client.Get; 037import org.apache.hadoop.hbase.client.Put; 038import org.apache.hadoop.hbase.client.Result; 039import org.apache.hadoop.hbase.client.Table; 040import org.apache.hadoop.hbase.client.TableDescriptor; 041import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 042import org.apache.hadoop.hbase.testclassification.LargeTests; 043import org.apache.hadoop.hbase.util.Bytes; 044import org.apache.hadoop.hbase.util.JVMClusterUtil; 045import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; 046import org.junit.After; 047import org.junit.AfterClass; 048import org.junit.Before; 049import org.junit.BeforeClass; 050import org.junit.ClassRule; 051import org.junit.Test; 052import org.junit.experimental.categories.Category; 053import org.slf4j.Logger; 054import org.slf4j.LoggerFactory; 055 056@Category({ LargeTests.class }) 057public class TestReplicationEditsDroppedWithDeletedTableCFs { 058 059 @ClassRule 060 public static final HBaseClassTestRule CLASS_RULE = 061 HBaseClassTestRule.forClass(TestReplicationEditsDroppedWithDeletedTableCFs.class); 062 063 private static final Logger LOG = 064 LoggerFactory.getLogger(TestReplicationEditsDroppedWithDeletedTableCFs.class); 065 066 private static Configuration conf1 = HBaseConfiguration.create(); 067 private static Configuration conf2 = HBaseConfiguration.create(); 068 069 protected static HBaseTestingUtility utility1; 070 protected static HBaseTestingUtility utility2; 071 072 private static Admin admin1; 073 private static Admin admin2; 074 075 private static final TableName TABLE = TableName.valueOf("table"); 076 private static final byte[] NORMAL_CF = Bytes.toBytes("normal_cf"); 077 private static final byte[] DROPPED_CF = Bytes.toBytes("dropped_cf"); 078 079 private static final byte[] ROW = Bytes.toBytes("row"); 080 private static final byte[] QUALIFIER = Bytes.toBytes("q"); 081 private static final byte[] VALUE = Bytes.toBytes("value"); 082 083 private static final String PEER_ID = "1"; 084 private static final long SLEEP_TIME = 1000; 085 private static final int NB_RETRIES = 10; 086 087 @BeforeClass 088 public static void setUpBeforeClass() throws Exception { 089 // Set true to filter replication edits for dropped table 090 conf1.setBoolean(REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY, true); 091 conf1.set(ZOOKEEPER_ZNODE_PARENT, "/1"); 092 conf1.setInt("replication.source.nb.capacity", 1); 093 utility1 = new HBaseTestingUtility(conf1); 094 utility1.startMiniZKCluster(); 095 MiniZooKeeperCluster miniZK = utility1.getZkCluster(); 096 conf1 = utility1.getConfiguration(); 097 098 conf2 = HBaseConfiguration.create(conf1); 099 conf2.set(ZOOKEEPER_ZNODE_PARENT, "/2"); 100 utility2 = new HBaseTestingUtility(conf2); 101 utility2.setZkCluster(miniZK); 102 103 utility1.startMiniCluster(1); 104 utility2.startMiniCluster(1); 105 106 admin1 = utility1.getAdmin(); 107 admin2 = utility2.getAdmin(); 108 } 109 110 @AfterClass 111 public static void tearDownAfterClass() throws Exception { 112 utility2.shutdownMiniCluster(); 113 utility1.shutdownMiniCluster(); 114 } 115 116 @Before 117 public void setup() throws Exception { 118 // Roll log 119 for (JVMClusterUtil.RegionServerThread r : utility1.getHBaseCluster() 120 .getRegionServerThreads()) { 121 utility1.getAdmin().rollWALWriter(r.getRegionServer().getServerName()); 122 } 123 // add peer 124 ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() 125 .setClusterKey(utility2.getClusterKey()).setReplicateAllUserTables(true).build(); 126 admin1.addReplicationPeer(PEER_ID, rpc); 127 // create table 128 createTable(); 129 } 130 131 @After 132 public void tearDown() throws Exception { 133 // Remove peer 134 admin1.removeReplicationPeer(PEER_ID); 135 // Drop table 136 admin1.disableTable(TABLE); 137 admin1.deleteTable(TABLE); 138 admin2.disableTable(TABLE); 139 admin2.deleteTable(TABLE); 140 } 141 142 private void createTable() throws Exception { 143 TableDescriptor desc = createTableDescriptor(NORMAL_CF, DROPPED_CF); 144 admin1.createTable(desc); 145 admin2.createTable(desc); 146 utility1.waitUntilAllRegionsAssigned(desc.getTableName()); 147 utility2.waitUntilAllRegionsAssigned(desc.getTableName()); 148 } 149 150 @Test 151 public void testEditsDroppedWithDeleteCF() throws Exception { 152 admin1.disableReplicationPeer(PEER_ID); 153 154 try (Table table = utility1.getConnection().getTable(TABLE)) { 155 Put put = new Put(ROW); 156 put.addColumn(DROPPED_CF, QUALIFIER, VALUE); 157 table.put(put); 158 } 159 160 deleteCf(admin1); 161 deleteCf(admin2); 162 163 admin1.enableReplicationPeer(PEER_ID); 164 165 verifyReplicationProceeded(); 166 } 167 168 @Test 169 public void testEditsBehindDeleteCFTiming() throws Exception { 170 admin1.disableReplicationPeer(PEER_ID); 171 172 try (Table table = utility1.getConnection().getTable(TABLE)) { 173 Put put = new Put(ROW); 174 put.addColumn(DROPPED_CF, QUALIFIER, VALUE); 175 table.put(put); 176 } 177 178 // Only delete cf from peer cluster 179 deleteCf(admin2); 180 181 admin1.enableReplicationPeer(PEER_ID); 182 183 // the source table's cf still exists, replication should be stalled 184 verifyReplicationStuck(); 185 deleteCf(admin1); 186 // now the source table's cf is gone, replication should proceed, the 187 // offending edits be dropped 188 verifyReplicationProceeded(); 189 } 190 191 private void verifyReplicationProceeded() throws Exception { 192 try (Table table = utility1.getConnection().getTable(TABLE)) { 193 Put put = new Put(ROW); 194 put.addColumn(NORMAL_CF, QUALIFIER, VALUE); 195 table.put(put); 196 } 197 utility2.waitFor(NB_RETRIES * SLEEP_TIME, (Predicate<Exception>) () -> { 198 try (Table peerTable = utility2.getConnection().getTable(TABLE)) { 199 Result result = peerTable.get(new Get(ROW).addColumn(NORMAL_CF, QUALIFIER)); 200 return result != null && !result.isEmpty() 201 && Bytes.equals(VALUE, result.getValue(NORMAL_CF, QUALIFIER)); 202 } 203 }); 204 } 205 206 private void verifyReplicationStuck() throws Exception { 207 try (Table table = utility1.getConnection().getTable(TABLE)) { 208 Put put = new Put(ROW); 209 put.addColumn(NORMAL_CF, QUALIFIER, VALUE); 210 table.put(put); 211 } 212 try (Table peerTable = utility2.getConnection().getTable(TABLE)) { 213 for (int i = 0; i < NB_RETRIES; i++) { 214 Result result = peerTable.get(new Get(ROW).addColumn(NORMAL_CF, QUALIFIER)); 215 if (result != null && !result.isEmpty()) { 216 fail("Edit should have been stuck behind dropped tables, but value is " 217 + Bytes.toString(result.getValue(NORMAL_CF, QUALIFIER))); 218 } else { 219 LOG.info("Row not replicated, let's wait a bit more..."); 220 Thread.sleep(SLEEP_TIME); 221 } 222 } 223 } 224 } 225 226 private TableDescriptor createTableDescriptor(byte[]... cfs) { 227 return TableDescriptorBuilder.newBuilder(TABLE).setColumnFamilies(Arrays.stream(cfs).map( 228 cf -> ColumnFamilyDescriptorBuilder.newBuilder(cf).setScope(REPLICATION_SCOPE_GLOBAL).build()) 229 .collect(Collectors.toList())).build(); 230 } 231 232 private void deleteCf(Admin admin) throws IOException { 233 TableDescriptor desc = createTableDescriptor(NORMAL_CF); 234 admin.modifyTable(desc); 235 } 236}