001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertTrue; 023import static org.mockito.Mockito.mock; 024import static org.mockito.Mockito.when; 025 026import java.io.IOException; 027import java.util.ArrayList; 028import java.util.List; 029import org.apache.hadoop.fs.FSDataOutputStream; 030import org.apache.hadoop.fs.Path; 031import org.apache.hadoop.hbase.HBaseClassTestRule; 032import org.apache.hadoop.hbase.HBaseTestingUtil; 033import org.apache.hadoop.hbase.ServerName; 034import org.apache.hadoop.hbase.TableName; 035import org.apache.hadoop.hbase.client.Put; 036import org.apache.hadoop.hbase.client.Table; 037import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; 038import org.apache.hadoop.hbase.testclassification.MediumTests; 039import org.apache.hadoop.hbase.testclassification.RegionServerTests; 040import org.apache.hadoop.hbase.util.Bytes; 041import org.junit.After; 042import org.junit.Before; 043import org.junit.ClassRule; 044import org.junit.Test; 045import org.junit.experimental.categories.Category; 046 047@Category({ MediumTests.class, RegionServerTests.class }) 048public class TestBrokenStoreFileCleaner { 049 050 @ClassRule 051 public static final HBaseClassTestRule CLASS_RULE = 052 HBaseClassTestRule.forClass(TestBrokenStoreFileCleaner.class); 053 054 private final HBaseTestingUtil testUtil = new HBaseTestingUtil(); 055 private final static byte[] fam = Bytes.toBytes("cf_1"); 056 private final static byte[] qual1 = Bytes.toBytes("qf_1"); 057 private final static byte[] val = Bytes.toBytes("val"); 058 private final static String junkFileName = "409fad9a751c4e8c86d7f32581bdc156"; 059 TableName tableName; 060 061 @Before 062 public void setUp() throws Exception { 063 testUtil.getConfiguration().set(StoreFileTrackerFactory.TRACKER_IMPL, 064 "org.apache.hadoop.hbase.regionserver.storefiletracker.FileBasedStoreFileTracker"); 065 testUtil.getConfiguration().set(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_ENABLED, 066 "true"); 067 testUtil.getConfiguration().set(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_TTL, "0"); 068 testUtil.getConfiguration().set(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_PERIOD, 069 "15000000"); 070 testUtil.getConfiguration().set(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_DELAY, "0"); 071 testUtil.startMiniCluster(1); 072 } 073 074 @After 075 public void tearDown() throws Exception { 076 testUtil.shutdownMiniCluster(); 077 } 078 079 @Test 080 public void testDeletingJunkFile() throws Exception { 081 tableName = TableName.valueOf(getClass().getSimpleName() + "testDeletingJunkFile"); 082 createTableWithData(tableName); 083 084 HRegion region = testUtil.getMiniHBaseCluster().getRegions(tableName).get(0); 085 ServerName sn = testUtil.getMiniHBaseCluster().getServerHoldingRegion(tableName, 086 region.getRegionInfo().getRegionName()); 087 HRegionServer rs = testUtil.getMiniHBaseCluster().getRegionServer(sn); 088 BrokenStoreFileCleaner cleaner = rs.getBrokenStoreFileCleaner(); 089 090 // create junk file 091 HStore store = region.getStore(fam); 092 Path cfPath = store.getRegionFileSystem().getStoreDir(store.getColumnFamilyName()); 093 Path junkFilePath = new Path(cfPath, junkFileName); 094 095 FSDataOutputStream junkFileOS = store.getFileSystem().create(junkFilePath); 096 junkFileOS.writeUTF("hello"); 097 junkFileOS.close(); 098 099 int storeFiles = store.getStorefilesCount(); 100 assertTrue(storeFiles > 0); 101 102 // verify the file exist before the chore and missing afterwards 103 assertTrue(store.getFileSystem().exists(junkFilePath)); 104 cleaner.chore(); 105 assertFalse(store.getFileSystem().exists(junkFilePath)); 106 107 // verify no storefile got deleted 108 int currentStoreFiles = store.getStorefilesCount(); 109 assertEquals(currentStoreFiles, storeFiles); 110 } 111 112 @Test 113 public void testSkippingCompactedFiles() throws Exception { 114 tableName = TableName.valueOf(getClass().getSimpleName() + "testSkippningCompactedFiles"); 115 createTableWithData(tableName); 116 117 HRegion region = testUtil.getMiniHBaseCluster().getRegions(tableName).get(0); 118 119 ServerName sn = testUtil.getMiniHBaseCluster().getServerHoldingRegion(tableName, 120 region.getRegionInfo().getRegionName()); 121 HRegionServer rs = testUtil.getMiniHBaseCluster().getRegionServer(sn); 122 BrokenStoreFileCleaner cleaner = rs.getBrokenStoreFileCleaner(); 123 124 // run major compaction to generate compaced files 125 region.compact(true); 126 127 // make sure there are compacted files 128 HStore store = region.getStore(fam); 129 int compactedFiles = store.getCompactedFilesCount(); 130 assertTrue(compactedFiles > 0); 131 132 cleaner.chore(); 133 134 // verify none of the compacted files were deleted 135 int existingCompactedFiles = store.getCompactedFilesCount(); 136 assertEquals(compactedFiles, existingCompactedFiles); 137 138 // verify adding a junk file does not break anything 139 Path cfPath = store.getRegionFileSystem().getStoreDir(store.getColumnFamilyName()); 140 Path junkFilePath = new Path(cfPath, junkFileName); 141 142 FSDataOutputStream junkFileOS = store.getFileSystem().create(junkFilePath); 143 junkFileOS.writeUTF("hello"); 144 junkFileOS.close(); 145 146 assertTrue(store.getFileSystem().exists(junkFilePath)); 147 cleaner.setEnabled(true); 148 cleaner.chore(); 149 assertFalse(store.getFileSystem().exists(junkFilePath)); 150 151 // verify compacted files are still intact 152 existingCompactedFiles = store.getCompactedFilesCount(); 153 assertEquals(compactedFiles, existingCompactedFiles); 154 } 155 156 @Test 157 public void testJunkFileTTL() throws Exception { 158 tableName = TableName.valueOf(getClass().getSimpleName() + "testDeletingJunkFile"); 159 createTableWithData(tableName); 160 161 HRegion region = testUtil.getMiniHBaseCluster().getRegions(tableName).get(0); 162 ServerName sn = testUtil.getMiniHBaseCluster().getServerHoldingRegion(tableName, 163 region.getRegionInfo().getRegionName()); 164 HRegionServer rs = testUtil.getMiniHBaseCluster().getRegionServer(sn); 165 166 // create junk file 167 HStore store = region.getStore(fam); 168 Path cfPath = store.getRegionFileSystem().getStoreDir(store.getColumnFamilyName()); 169 Path junkFilePath = new Path(cfPath, junkFileName); 170 171 FSDataOutputStream junkFileOS = store.getFileSystem().create(junkFilePath); 172 junkFileOS.writeUTF("hello"); 173 junkFileOS.close(); 174 175 int storeFiles = store.getStorefilesCount(); 176 assertTrue(storeFiles > 0); 177 178 // verify the file exist before the chore 179 assertTrue(store.getFileSystem().exists(junkFilePath)); 180 181 // set a 5 sec ttl 182 rs.getConfiguration().set(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_TTL, "5000"); 183 BrokenStoreFileCleaner cleaner = 184 new BrokenStoreFileCleaner(15000000, 0, rs, rs.getConfiguration(), rs); 185 cleaner.chore(); 186 // file is still present after chore run 187 assertTrue(store.getFileSystem().exists(junkFilePath)); 188 Thread.sleep(5000); 189 cleaner.chore(); 190 assertFalse(store.getFileSystem().exists(junkFilePath)); 191 192 // verify no storefile got deleted 193 int currentStoreFiles = store.getStorefilesCount(); 194 assertEquals(currentStoreFiles, storeFiles); 195 } 196 197 @Test 198 public void testWhenRegionIsClosing() throws Exception { 199 tableName = TableName.valueOf(getClass().getSimpleName() + "testWhenRegionIsClosing"); 200 createTableWithData(tableName); 201 202 HRegion region = testUtil.getMiniHBaseCluster().getRegions(tableName).get(0); 203 ServerName sn = testUtil.getMiniHBaseCluster().getServerHoldingRegion(tableName, 204 region.getRegionInfo().getRegionName()); 205 HRegionServer rs = testUtil.getMiniHBaseCluster().getRegionServer(sn); 206 207 HStore store = region.getStore(fam); 208 int expectedStoreFiles = store.getStorefilesCount(); 209 assertTrue(expectedStoreFiles > 0); 210 Path cfPath = store.getRegionFileSystem().getStoreDir(store.getColumnFamilyName()); 211 // because we use FILE SFT, there will be a .filelist dir under the store dir 212 int totalFiles = store.getRegionFileSystem().getFileSystem().listStatus(cfPath).length - 1; 213 assertEquals(expectedStoreFiles, totalFiles); 214 215 HRegionServer mockedServer = mock(HRegionServer.class); 216 HRegion mockedRegion = mock(HRegion.class); 217 when(mockedRegion.isAvailable()).thenReturn(region.isAvailable()); 218 when(mockedRegion.getRegionFileSystem()).thenReturn(region.getRegionFileSystem()); 219 List<HRegion> mockedRegionsList = new ArrayList<>(); 220 mockedRegionsList.add(mockedRegion); 221 when(mockedServer.getRegions()).thenReturn(mockedRegionsList); 222 when(mockedServer.getServerName()).thenReturn(rs.getServerName()); 223 when(mockedRegion.getStores()).thenAnswer(i -> { 224 region.close(); 225 return region.getStores(); 226 }); 227 228 BrokenStoreFileCleaner cleaner = 229 new BrokenStoreFileCleaner(15000000, 0, rs, rs.getConfiguration(), mockedServer); 230 231 cleaner.chore(); 232 233 // verify no storefile got deleted 234 int currentStoreFiles = 235 store.getRegionFileSystem().getFileSystem().listStatus(cfPath).length - 1; 236 assertEquals(expectedStoreFiles, currentStoreFiles); 237 } 238 239 private Table createTableWithData(TableName tableName) throws IOException { 240 Table table = testUtil.createTable(tableName, fam); 241 try { 242 for (int i = 1; i < 10; i++) { 243 Put p = new Put(Bytes.toBytes("row" + i)); 244 p.addColumn(fam, qual1, val); 245 table.put(p); 246 } 247 // flush them 248 testUtil.getAdmin().flush(tableName); 249 for (int i = 11; i < 20; i++) { 250 Put p = new Put(Bytes.toBytes("row" + i)); 251 p.addColumn(fam, qual1, val); 252 table.put(p); 253 } 254 // flush them 255 testUtil.getAdmin().flush(tableName); 256 for (int i = 21; i < 30; i++) { 257 Put p = new Put(Bytes.toBytes("row" + i)); 258 p.addColumn(fam, qual1, val); 259 table.put(p); 260 } 261 // flush them 262 testUtil.getAdmin().flush(tableName); 263 } catch (IOException e) { 264 table.close(); 265 throw e; 266 } 267 return table; 268 } 269}