001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.util; 019 020import static org.junit.Assert.assertEquals; 021 022import java.util.concurrent.ScheduledThreadPoolExecutor; 023import java.util.concurrent.SynchronousQueue; 024import java.util.concurrent.ThreadPoolExecutor; 025import java.util.concurrent.TimeUnit; 026import org.apache.hadoop.fs.FileSystem; 027import org.apache.hadoop.fs.Path; 028import org.apache.hadoop.hbase.HBaseClassTestRule; 029import org.apache.hadoop.hbase.HConstants; 030import org.apache.hadoop.hbase.TableName; 031import org.apache.hadoop.hbase.client.ClusterConnection; 032import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; 033import org.apache.hadoop.hbase.io.hfile.TestHFile; 034import org.apache.hadoop.hbase.master.assignment.AssignmentManager; 035import org.apache.hadoop.hbase.mob.MobUtils; 036import org.apache.hadoop.hbase.testclassification.MediumTests; 037import org.apache.hadoop.hbase.testclassification.MiscTests; 038import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; 039import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; 040import org.junit.AfterClass; 041import org.junit.Before; 042import org.junit.BeforeClass; 043import org.junit.ClassRule; 044import org.junit.Test; 045import org.junit.experimental.categories.Category; 046 047import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; 048 049@Category({ MiscTests.class, MediumTests.class }) 050public class TestHBaseFsckMOB extends BaseTestHBaseFsck { 051 052 @ClassRule 053 public static final HBaseClassTestRule CLASS_RULE = 054 HBaseClassTestRule.forClass(TestHBaseFsckMOB.class); 055 056 @BeforeClass 057 public static void setUpBeforeClass() throws Exception { 058 TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, 059 MasterSyncCoprocessor.class.getName()); 060 061 conf.setInt("hbase.regionserver.handler.count", 2); 062 conf.setInt("hbase.regionserver.metahandler.count", 30); 063 064 conf.setInt("hbase.htable.threads.max", POOL_SIZE); 065 conf.setInt("hbase.hconnection.threads.max", 2 * POOL_SIZE); 066 conf.setInt("hbase.hbck.close.timeout", 2 * REGION_ONLINE_TIMEOUT); 067 conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 8 * REGION_ONLINE_TIMEOUT); 068 TEST_UTIL.startMiniCluster(1); 069 070 tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, 071 new SynchronousQueue<>(), new ThreadFactoryBuilder().setNameFormat("testhbck-pool-%d") 072 .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); 073 074 hbfsckExecutorService = new ScheduledThreadPoolExecutor(POOL_SIZE); 075 076 AssignmentManager assignmentManager = 077 TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(); 078 regionStates = assignmentManager.getRegionStates(); 079 080 connection = (ClusterConnection) TEST_UTIL.getConnection(); 081 082 admin = connection.getAdmin(); 083 admin.setBalancerRunning(false, true); 084 085 TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); 086 TEST_UTIL.waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME); 087 } 088 089 @AfterClass 090 public static void tearDownAfterClass() throws Exception { 091 tableExecutorService.shutdown(); 092 hbfsckExecutorService.shutdown(); 093 admin.close(); 094 TEST_UTIL.shutdownMiniCluster(); 095 } 096 097 @Before 098 public void setUp() { 099 EnvironmentEdgeManager.reset(); 100 } 101 102 /** 103 * This creates a table and then corrupts a mob file. Hbck should quarantine the file. 104 */ 105 @SuppressWarnings("deprecation") 106 @Test 107 public void testQuarantineCorruptMobFile() throws Exception { 108 TableName table = TableName.valueOf(name.getMethodName()); 109 try { 110 setupMobTable(table); 111 assertEquals(ROWKEYS.length, countRows()); 112 admin.flush(table); 113 114 FileSystem fs = FileSystem.get(conf); 115 Path mobFile = getFlushedMobFile(fs, table); 116 admin.disableTable(table); 117 // create new corrupt mob file. 118 String corruptMobFile = createMobFileName(mobFile.getName()); 119 Path corrupt = new Path(mobFile.getParent(), corruptMobFile); 120 TestHFile.truncateFile(fs, mobFile, corrupt); 121 LOG.info("Created corrupted mob file " + corrupt); 122 HBaseFsck.debugLsr(conf, CommonFSUtils.getRootDir(conf)); 123 HBaseFsck.debugLsr(conf, MobUtils.getMobHome(conf)); 124 125 // A corrupt mob file doesn't abort the start of regions, so we can enable the table. 126 admin.enableTable(table); 127 HBaseFsck res = HbckTestingUtil.doHFileQuarantine(conf, table); 128 assertEquals(0, res.getRetCode()); 129 HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker(); 130 assertEquals(4, hfcc.getHFilesChecked()); 131 assertEquals(0, hfcc.getCorrupted().size()); 132 assertEquals(0, hfcc.getFailures().size()); 133 assertEquals(0, hfcc.getQuarantined().size()); 134 assertEquals(0, hfcc.getMissing().size()); 135 assertEquals(5, hfcc.getMobFilesChecked()); 136 assertEquals(1, hfcc.getCorruptedMobFiles().size()); 137 assertEquals(0, hfcc.getFailureMobFiles().size()); 138 assertEquals(1, hfcc.getQuarantinedMobFiles().size()); 139 assertEquals(0, hfcc.getMissedMobFiles().size()); 140 String quarantinedMobFile = hfcc.getQuarantinedMobFiles().iterator().next().getName(); 141 assertEquals(corruptMobFile, quarantinedMobFile); 142 } finally { 143 cleanupTable(table); 144 } 145 } 146}