001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.replication;
019
020import static org.junit.Assert.fail;
021
022import org.apache.hadoop.conf.Configuration;
023import org.apache.hadoop.hbase.HBaseClassTestRule;
024import org.apache.hadoop.hbase.HBaseConfiguration;
025import org.apache.hadoop.hbase.HBaseTestingUtil;
026import org.apache.hadoop.hbase.HConstants;
027import org.apache.hadoop.hbase.NamespaceDescriptor;
028import org.apache.hadoop.hbase.TableName;
029import org.apache.hadoop.hbase.Waiter.Predicate;
030import org.apache.hadoop.hbase.client.Admin;
031import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
032import org.apache.hadoop.hbase.client.Get;
033import org.apache.hadoop.hbase.client.Put;
034import org.apache.hadoop.hbase.client.Result;
035import org.apache.hadoop.hbase.client.Table;
036import org.apache.hadoop.hbase.client.TableDescriptor;
037import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
038import org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint;
039import org.apache.hadoop.hbase.testclassification.LargeTests;
040import org.apache.hadoop.hbase.util.Bytes;
041import org.apache.hadoop.hbase.util.JVMClusterUtil;
042import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
043import org.junit.After;
044import org.junit.AfterClass;
045import org.junit.Before;
046import org.junit.BeforeClass;
047import org.junit.ClassRule;
048import org.junit.Test;
049import org.junit.experimental.categories.Category;
050import org.slf4j.Logger;
051import org.slf4j.LoggerFactory;
052
053@Category({ LargeTests.class })
054public class TestReplicationEditsDroppedWithDroppedTable {
055
056  @ClassRule
057  public static final HBaseClassTestRule CLASS_RULE =
058    HBaseClassTestRule.forClass(TestReplicationEditsDroppedWithDroppedTable.class);
059
060  private static final Logger LOG =
061    LoggerFactory.getLogger(TestReplicationEditsDroppedWithDroppedTable.class);
062
063  private static Configuration conf1 = HBaseConfiguration.create();
064  private static Configuration conf2 = HBaseConfiguration.create();
065
066  protected static HBaseTestingUtil utility1;
067  protected static HBaseTestingUtil utility2;
068
069  private static Admin admin1;
070  private static Admin admin2;
071
072  private static final String namespace = "NS";
073  private static final TableName NORMAL_TABLE = TableName.valueOf("normal-table");
074  private static final TableName DROPPED_TABLE = TableName.valueOf("dropped-table");
075  private static final TableName DROPPED_NS_TABLE = TableName.valueOf("NS:dropped-table");
076  private static final byte[] ROW = Bytes.toBytes("row");
077  private static final byte[] FAMILY = Bytes.toBytes("f");
078  private static final byte[] QUALIFIER = Bytes.toBytes("q");
079  private static final byte[] VALUE = Bytes.toBytes("value");
080
081  private static final String PEER_ID = "1";
082  private static final long SLEEP_TIME = 1000;
083  private static final int NB_RETRIES = 10;
084
085  @BeforeClass
086  public static void setUpBeforeClass() throws Exception {
087    // Set true to filter replication edits for dropped table
088    conf1.setBoolean(HBaseInterClusterReplicationEndpoint.REPLICATION_DROP_ON_DELETED_TABLE_KEY,
089      true);
090    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
091    conf1.setInt("replication.source.nb.capacity", 1);
092    utility1 = new HBaseTestingUtil(conf1);
093    utility1.startMiniZKCluster();
094    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
095    conf1 = utility1.getConfiguration();
096
097    conf2 = HBaseConfiguration.create(conf1);
098    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
099    utility2 = new HBaseTestingUtil(conf2);
100    utility2.setZkCluster(miniZK);
101
102    utility1.startMiniCluster(1);
103    utility2.startMiniCluster(1);
104
105    admin1 = utility1.getAdmin();
106    admin2 = utility2.getAdmin();
107
108    NamespaceDescriptor nsDesc = NamespaceDescriptor.create(namespace).build();
109    admin1.createNamespace(nsDesc);
110    admin2.createNamespace(nsDesc);
111  }
112
113  @AfterClass
114  public static void tearDownAfterClass() throws Exception {
115    utility2.shutdownMiniCluster();
116    utility1.shutdownMiniCluster();
117  }
118
119  @Before
120  public void setup() throws Exception {
121    // Roll log
122    for (JVMClusterUtil.RegionServerThread r : utility1.getHBaseCluster()
123      .getRegionServerThreads()) {
124      utility1.getAdmin().rollWALWriter(r.getRegionServer().getServerName());
125    }
126    // add peer
127    ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder()
128      .setClusterKey(utility2.getRpcConnnectionURI()).setReplicateAllUserTables(true).build();
129    admin1.addReplicationPeer(PEER_ID, rpc);
130    // create table
131    createTable(NORMAL_TABLE);
132  }
133
134  @After
135  public void tearDown() throws Exception {
136    // Remove peer
137    admin1.removeReplicationPeer(PEER_ID);
138    // Drop table
139    admin1.disableTable(NORMAL_TABLE);
140    admin1.deleteTable(NORMAL_TABLE);
141    admin2.disableTable(NORMAL_TABLE);
142    admin2.deleteTable(NORMAL_TABLE);
143  }
144
145  private void createTable(TableName tableName) throws Exception {
146    TableDescriptor desc =
147      TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder
148        .newBuilder(FAMILY).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build();
149    admin1.createTable(desc);
150    admin2.createTable(desc);
151    utility1.waitUntilAllRegionsAssigned(tableName);
152    utility2.waitUntilAllRegionsAssigned(tableName);
153  }
154
155  @Test
156  public void testEditsDroppedWithDroppedTable() throws Exception {
157    testWithDroppedTable(DROPPED_TABLE);
158  }
159
160  @Test
161  public void testEditsDroppedWithDroppedTableNS() throws Exception {
162    testWithDroppedTable(DROPPED_NS_TABLE);
163  }
164
165  private void testWithDroppedTable(TableName droppedTableName) throws Exception {
166    createTable(droppedTableName);
167    admin1.disableReplicationPeer(PEER_ID);
168
169    try (Table droppedTable = utility1.getConnection().getTable(droppedTableName)) {
170      Put put = new Put(ROW);
171      put.addColumn(FAMILY, QUALIFIER, VALUE);
172      droppedTable.put(put);
173    }
174
175    admin1.disableTable(droppedTableName);
176    admin1.deleteTable(droppedTableName);
177    admin2.disableTable(droppedTableName);
178    admin2.deleteTable(droppedTableName);
179
180    admin1.enableReplicationPeer(PEER_ID);
181
182    verifyReplicationProceeded();
183  }
184
185  @Test
186  public void testEditsBehindDroppedTableTiming() throws Exception {
187    createTable(DROPPED_TABLE);
188    admin1.disableReplicationPeer(PEER_ID);
189
190    try (Table droppedTable = utility1.getConnection().getTable(DROPPED_TABLE)) {
191      Put put = new Put(ROW);
192      put.addColumn(FAMILY, QUALIFIER, VALUE);
193      droppedTable.put(put);
194    }
195
196    // Only delete table from peer cluster
197    admin2.disableTable(DROPPED_TABLE);
198    admin2.deleteTable(DROPPED_TABLE);
199
200    admin1.enableReplicationPeer(PEER_ID);
201
202    // the source table still exists, replication should be stalled
203    verifyReplicationStuck();
204    admin1.disableTable(DROPPED_TABLE);
205    // still stuck, source table still exists
206    verifyReplicationStuck();
207    admin1.deleteTable(DROPPED_TABLE);
208    // now the source table is gone, replication should proceed, the
209    // offending edits be dropped
210    verifyReplicationProceeded();
211  }
212
213  private void verifyReplicationProceeded() throws Exception {
214    try (Table normalTable = utility1.getConnection().getTable(NORMAL_TABLE)) {
215      Put put = new Put(ROW);
216      put.addColumn(FAMILY, QUALIFIER, VALUE);
217      normalTable.put(put);
218    }
219    utility2.waitFor(NB_RETRIES * SLEEP_TIME, (Predicate<Exception>) () -> {
220      try (Table normalTable = utility2.getConnection().getTable(NORMAL_TABLE)) {
221        Result result = normalTable.get(new Get(ROW).addColumn(FAMILY, QUALIFIER));
222        return result != null && !result.isEmpty()
223          && Bytes.equals(VALUE, result.getValue(FAMILY, QUALIFIER));
224      }
225    });
226  }
227
228  private void verifyReplicationStuck() throws Exception {
229    try (Table normalTable = utility1.getConnection().getTable(NORMAL_TABLE)) {
230      Put put = new Put(ROW);
231      put.addColumn(FAMILY, QUALIFIER, VALUE);
232      normalTable.put(put);
233    }
234    try (Table normalTable = utility2.getConnection().getTable(NORMAL_TABLE)) {
235      for (int i = 0; i < NB_RETRIES; i++) {
236        Result result = normalTable.get(new Get(ROW).addColumn(FAMILY, QUALIFIER));
237        if (result != null && !result.isEmpty()) {
238          fail("Edit should have been stuck behind dropped tables, but value is "
239            + Bytes.toString(result.getValue(FAMILY, QUALIFIER)));
240        } else {
241          LOG.info("Row not replicated, let's wait a bit more...");
242          Thread.sleep(SLEEP_TIME);
243        }
244      }
245    }
246  }
247}