001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import static org.junit.Assert.assertArrayEquals; 021import static org.junit.Assert.assertTrue; 022 023import java.util.Arrays; 024import java.util.List; 025import org.apache.hadoop.conf.Configuration; 026import org.apache.hadoop.hbase.HBaseClassTestRule; 027import org.apache.hadoop.hbase.HBaseTestingUtil; 028import org.apache.hadoop.hbase.HConstants; 029import org.apache.hadoop.hbase.HRegionLocation; 030import org.apache.hadoop.hbase.MetaTableAccessor; 031import org.apache.hadoop.hbase.ServerName; 032import org.apache.hadoop.hbase.TableName; 033import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore; 034import org.apache.hadoop.hbase.testclassification.MediumTests; 035import org.apache.hadoop.hbase.testclassification.MiscTests; 036import org.apache.hadoop.hbase.util.Bytes; 037import org.apache.hadoop.hbase.zookeeper.ZKUtil; 038import org.apache.hadoop.hbase.zookeeper.ZKWatcher; 039import org.apache.hadoop.hbase.zookeeper.ZNodePaths; 040import org.junit.BeforeClass; 041import org.junit.ClassRule; 042import org.junit.Test; 043import org.junit.experimental.categories.Category; 044import org.slf4j.Logger; 045import org.slf4j.LoggerFactory; 046 047import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 048 049@Category({ MiscTests.class, MediumTests.class }) 050public class TestMetaWithReplicasShutdownHandling extends MetaWithReplicasTestBase { 051 052 @ClassRule 053 public static final HBaseClassTestRule CLASS_RULE = 054 HBaseClassTestRule.forClass(TestMetaWithReplicasShutdownHandling.class); 055 056 private static final Logger LOG = 057 LoggerFactory.getLogger(TestMetaWithReplicasShutdownHandling.class); 058 059 @BeforeClass 060 public static void setUp() throws Exception { 061 startCluster(); 062 } 063 064 @Test 065 public void testShutdownHandling() throws Exception { 066 // This test creates a table, flushes the meta (with 3 replicas), kills the 067 // server holding the primary meta replica. Then it does a put/get into/from 068 // the test table. The put/get operations would use the replicas to locate the 069 // location of the test table's region 070 shutdownMetaAndDoValidations(TEST_UTIL); 071 } 072 073 public static void shutdownMetaAndDoValidations(HBaseTestingUtil util) throws Exception { 074 // This test creates a table, flushes the meta (with 3 replicas), kills the 075 // server holding the primary meta replica. Then it does a put/get into/from 076 // the test table. The put/get operations would use the replicas to locate the 077 // location of the test table's region 078 ZKWatcher zkw = util.getZooKeeperWatcher(); 079 Configuration conf = util.getConfiguration(); 080 conf.setBoolean(HConstants.USE_META_REPLICAS, true); 081 082 String baseZNode = 083 conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); 084 String primaryMetaZnode = 085 ZNodePaths.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server")); 086 byte[] data = ZKUtil.getData(zkw, primaryMetaZnode); 087 ServerName primary = ProtobufUtil.toServerName(data); 088 LOG.info("Primary=" + primary.toString()); 089 090 TableName TABLE = TableName.valueOf("testShutdownHandling"); 091 byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") }; 092 if (util.getAdmin().tableExists(TABLE)) { 093 util.getAdmin().disableTable(TABLE); 094 util.getAdmin().deleteTable(TABLE); 095 } 096 byte[] row = Bytes.toBytes("test"); 097 ServerName master = null; 098 try (Connection c = ConnectionFactory.createConnection(util.getConfiguration())) { 099 try (Table htable = util.createTable(TABLE, FAMILIES)) { 100 util.getAdmin().flush(TableName.META_TABLE_NAME); 101 Thread.sleep( 102 conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 30000) * 6); 103 List<RegionInfo> regions = MetaTableAccessor.getTableRegions(c, TABLE); 104 HRegionLocation hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0)); 105 // Ensure that the primary server for test table is not the same one as the primary 106 // of the meta region since we will be killing the srv holding the meta's primary... 107 // We want to be able to write to the test table even when the meta is not present .. 108 // If the servers are the same, then move the test table's region out of the server 109 // to another random server 110 if (hrl.getServerName().equals(primary)) { 111 util.getAdmin().move(hrl.getRegion().getEncodedNameAsBytes()); 112 // wait for the move to complete 113 do { 114 Thread.sleep(10); 115 hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0)); 116 } while (primary.equals(hrl.getServerName())); 117 util.getAdmin().flush(TableName.META_TABLE_NAME); 118 Thread.sleep( 119 conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 30000) * 3); 120 } 121 // Ensure all metas are not on same hbase:meta replica=0 server! 122 123 master = util.getHBaseClusterInterface().getClusterMetrics().getMasterName(); 124 // kill the master so that regionserver recovery is not triggered at all 125 // for the meta server 126 LOG.info("Stopping master=" + master.toString()); 127 util.getHBaseClusterInterface().stopMaster(master); 128 util.getHBaseClusterInterface().waitForMasterToStop(master, 60000); 129 LOG.info("Master " + master + " stopped!"); 130 if (!master.equals(primary)) { 131 util.getHBaseClusterInterface().killRegionServer(primary); 132 util.getHBaseClusterInterface().waitForRegionServerToStop(primary, 60000); 133 } 134 c.clearRegionLocationCache(); 135 } 136 LOG.info("Running GETs"); 137 try (Table htable = c.getTable(TABLE)) { 138 Put put = new Put(row); 139 put.addColumn(Bytes.toBytes("foo"), row, row); 140 BufferedMutator m = c.getBufferedMutator(TABLE); 141 m.mutate(put); 142 m.flush(); 143 // Try to do a get of the row that was just put 144 Result r = htable.get(new Get(row)); 145 assertTrue(Arrays.equals(r.getRow(), row)); 146 // now start back the killed servers and disable use of replicas. That would mean 147 // calls go to the primary 148 LOG.info("Starting Master"); 149 util.getHBaseClusterInterface().startMaster(master.getHostname(), 0); 150 util.getHBaseClusterInterface().startRegionServer(primary.getHostname(), 0); 151 util.getHBaseClusterInterface().waitForActiveAndReadyMaster(); 152 LOG.info("Master active!"); 153 c.clearRegionLocationCache(); 154 } 155 } 156 conf.setBoolean(HConstants.USE_META_REPLICAS, false); 157 LOG.info("Running GETs no replicas"); 158 try (Connection c = ConnectionFactory.createConnection(conf); 159 Table htable = c.getTable(TABLE)) { 160 Result r = htable.get(new Get(row)); 161 assertArrayEquals(row, r.getRow()); 162 } 163 } 164}