001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.apache.hadoop.hbase.HBaseTestingUtil.fam1; 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.assertTrue; 023 024import java.io.IOException; 025import org.apache.hadoop.hbase.HBaseClassTestRule; 026import org.apache.hadoop.hbase.HBaseTestingUtil; 027import org.apache.hadoop.hbase.SingleProcessHBaseCluster; 028import org.apache.hadoop.hbase.TableName; 029import org.apache.hadoop.hbase.client.Admin; 030import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 031import org.apache.hadoop.hbase.client.Connection; 032import org.apache.hadoop.hbase.client.ConnectionFactory; 033import org.apache.hadoop.hbase.client.Durability; 034import org.apache.hadoop.hbase.client.Get; 035import org.apache.hadoop.hbase.client.Put; 036import org.apache.hadoop.hbase.client.Result; 037import org.apache.hadoop.hbase.client.RowMutations; 038import org.apache.hadoop.hbase.client.Table; 039import org.apache.hadoop.hbase.client.TableDescriptor; 040import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 041import org.apache.hadoop.hbase.testclassification.MediumTests; 042import org.apache.hadoop.hbase.testclassification.RegionServerTests; 043import org.apache.hadoop.hbase.util.Bytes; 044import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 045import org.junit.After; 046import org.junit.AfterClass; 047import org.junit.Before; 048import org.junit.BeforeClass; 049import org.junit.ClassRule; 050import org.junit.Test; 051import org.junit.experimental.categories.Category; 052 053@Category({ RegionServerTests.class, MediumTests.class }) 054public class TestMutateRowsRecovery { 055 056 @ClassRule 057 public static final HBaseClassTestRule CLASS_RULE = 058 HBaseClassTestRule.forClass(TestMutateRowsRecovery.class); 059 060 private SingleProcessHBaseCluster cluster = null; 061 private Connection connection = null; 062 private static final int NB_SERVERS = 3; 063 064 static final byte[] qual1 = Bytes.toBytes("qual1"); 065 static final byte[] qual2 = Bytes.toBytes("qual2"); 066 static final byte[] value1 = Bytes.toBytes("value1"); 067 static final byte[] value2 = Bytes.toBytes("value2"); 068 static final byte[] row1 = Bytes.toBytes("rowA"); 069 static final byte[] row2 = Bytes.toBytes("rowB"); 070 071 static final HBaseTestingUtil TESTING_UTIL = new HBaseTestingUtil(); 072 073 @BeforeClass 074 public static void before() throws Exception { 075 TESTING_UTIL.startMiniCluster(NB_SERVERS); 076 } 077 078 @AfterClass 079 public static void after() throws Exception { 080 TESTING_UTIL.shutdownMiniCluster(); 081 } 082 083 @Before 084 public void setup() throws IOException { 085 TESTING_UTIL.ensureSomeNonStoppedRegionServersAvailable(NB_SERVERS); 086 this.connection = ConnectionFactory.createConnection(TESTING_UTIL.getConfiguration()); 087 this.cluster = TESTING_UTIL.getMiniHBaseCluster(); 088 } 089 090 @After 091 public void tearDown() throws IOException { 092 if (this.connection != null) { 093 this.connection.close(); 094 } 095 } 096 097 @Test 098 public void MutateRowsAndCheckPostKill() throws IOException, InterruptedException { 099 final TableName tableName = TableName.valueOf("test"); 100 Admin admin = null; 101 Table hTable = null; 102 try { 103 admin = connection.getAdmin(); 104 hTable = connection.getTable(tableName); 105 TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) 106 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); 107 admin.createTable(tableDescriptor); 108 109 // Add a multi 110 RowMutations rm = new RowMutations(row1); 111 Put p1 = new Put(row1); 112 p1.addColumn(fam1, qual1, value1); 113 p1.setDurability(Durability.SYNC_WAL); 114 rm.add(p1); 115 hTable.mutateRow(rm); 116 117 // Add a put 118 Put p2 = new Put(row1); 119 p2.addColumn(fam1, qual2, value2); 120 p2.setDurability(Durability.SYNC_WAL); 121 hTable.put(p2); 122 123 HRegionServer rs1 = TESTING_UTIL.getRSForFirstRegionInTable(tableName); 124 long now = EnvironmentEdgeManager.currentTime(); 125 // Send the RS Load to ensure correct lastflushedseqid for stores 126 rs1.tryRegionServerReport(now - 30000, now); 127 // Kill the RS to trigger wal replay 128 cluster.killRegionServer(rs1.getServerName()); 129 130 // Ensure correct data exists 131 Get g1 = new Get(row1); 132 Result result = hTable.get(g1); 133 assertTrue(result.getValue(fam1, qual1) != null); 134 assertEquals(0, Bytes.compareTo(result.getValue(fam1, qual1), value1)); 135 assertTrue(result.getValue(fam1, qual2) != null); 136 assertEquals(0, Bytes.compareTo(result.getValue(fam1, qual2), value2)); 137 } finally { 138 if (admin != null) { 139 admin.close(); 140 } 141 if (hTable != null) { 142 hTable.close(); 143 } 144 } 145 } 146}