001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertNotNull; 023import static org.junit.Assert.assertTrue; 024 025import java.util.Collection; 026import org.apache.hadoop.hbase.HBaseClassTestRule; 027import org.apache.hadoop.hbase.HBaseTestingUtil; 028import org.apache.hadoop.hbase.TableName; 029import org.apache.hadoop.hbase.client.Admin; 030import org.apache.hadoop.hbase.client.Delete; 031import org.apache.hadoop.hbase.client.Get; 032import org.apache.hadoop.hbase.client.Put; 033import org.apache.hadoop.hbase.client.Result; 034import org.apache.hadoop.hbase.client.ResultScanner; 035import org.apache.hadoop.hbase.client.Scan; 036import org.apache.hadoop.hbase.client.Table; 037import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; 038import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; 039import org.apache.hadoop.hbase.testclassification.MediumTests; 040import org.apache.hadoop.hbase.util.Bytes; 041import org.junit.AfterClass; 042import org.junit.BeforeClass; 043import org.junit.ClassRule; 044import org.junit.Test; 045import org.junit.experimental.categories.Category; 046 047@Category({ MediumTests.class }) 048public class TestCleanupCompactedFileOnRegionClose { 049 050 @ClassRule 051 public static final HBaseClassTestRule CLASS_RULE = 052 HBaseClassTestRule.forClass(TestCleanupCompactedFileOnRegionClose.class); 053 054 private static HBaseTestingUtil util; 055 056 @BeforeClass 057 public static void beforeClass() throws Exception { 058 util = new HBaseTestingUtil(); 059 util.getConfiguration().setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100); 060 util.getConfiguration().set("dfs.blocksize", "64000"); 061 util.getConfiguration().set("dfs.namenode.fs-limits.min-block-size", "1024"); 062 util.getConfiguration().set(TimeToLiveHFileCleaner.TTL_CONF_KEY, "0"); 063 util.startMiniCluster(2); 064 } 065 066 @AfterClass 067 public static void afterclass() throws Exception { 068 util.shutdownMiniCluster(); 069 } 070 071 @Test 072 public void testCleanupOnClose() throws Exception { 073 TableName tableName = TableName.valueOf("testCleanupOnClose"); 074 String familyName = "f"; 075 byte[] familyNameBytes = Bytes.toBytes(familyName); 076 util.createTable(tableName, familyName); 077 078 Admin hBaseAdmin = util.getAdmin(); 079 Table table = util.getConnection().getTable(tableName); 080 081 HRegionServer rs = util.getRSForFirstRegionInTable(tableName); 082 Region region = rs.getRegions(tableName).get(0); 083 084 int refSFCount = 4; 085 for (int i = 0; i < refSFCount; i++) { 086 for (int j = 0; j < refSFCount; j++) { 087 Put put = new Put(Bytes.toBytes(j)); 088 put.addColumn(familyNameBytes, Bytes.toBytes(i), Bytes.toBytes(j)); 089 table.put(put); 090 } 091 util.flush(tableName); 092 } 093 assertEquals(refSFCount, region.getStoreFileList(new byte[][] { familyNameBytes }).size()); 094 095 // add a delete, to test wether we end up with an inconsistency post region close 096 Delete delete = new Delete(Bytes.toBytes(refSFCount - 1)); 097 table.delete(delete); 098 util.flush(tableName); 099 assertFalse(table.exists(new Get(Bytes.toBytes(refSFCount - 1)))); 100 101 // Create a scanner and keep it open to add references to StoreFileReaders 102 Scan scan = new Scan(); 103 scan.withStopRow(Bytes.toBytes(refSFCount - 2)); 104 scan.setCaching(1); 105 ResultScanner scanner = table.getScanner(scan); 106 Result res = scanner.next(); 107 assertNotNull(res); 108 assertEquals(refSFCount, res.getFamilyMap(familyNameBytes).size()); 109 110 // Verify the references 111 int count = 0; 112 for (HStoreFile sf : (Collection<HStoreFile>) region.getStore(familyNameBytes) 113 .getStorefiles()) { 114 synchronized (sf) { 115 if (count < refSFCount) { 116 assertTrue(sf.isReferencedInReads()); 117 } else { 118 assertFalse(sf.isReferencedInReads()); 119 } 120 } 121 count++; 122 } 123 124 // Major compact to produce compacted storefiles that need to be cleaned up 125 util.compact(tableName, true); 126 assertEquals(1, region.getStoreFileList(new byte[][] { familyNameBytes }).size()); 127 assertEquals(refSFCount + 1, ((HStore) region.getStore(familyNameBytes)).getStoreEngine() 128 .getStoreFileManager().getCompactedfiles().size()); 129 130 // close then open the region to determine wether compacted storefiles get cleaned up on close 131 hBaseAdmin.unassign(region.getRegionInfo().getRegionName(), false); 132 hBaseAdmin.assign(region.getRegionInfo().getRegionName()); 133 util.waitUntilNoRegionsInTransition(10000); 134 135 assertFalse("Deleted row should not exist", 136 table.exists(new Get(Bytes.toBytes(refSFCount - 1)))); 137 138 rs = util.getRSForFirstRegionInTable(tableName); 139 region = rs.getRegions(tableName).get(0); 140 assertEquals(1, region.getStoreFileList(new byte[][] { familyNameBytes }).size()); 141 assertEquals(0, ((HStore) region.getStore(familyNameBytes)).getStoreEngine() 142 .getStoreFileManager().getCompactedfiles().size()); 143 } 144}