001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
021import static org.junit.Assert.fail;
022import static org.mockito.Mockito.spy;
023
024import java.io.IOException;
025import java.lang.reflect.Field;
026import java.util.concurrent.BlockingQueue;
027import java.util.concurrent.ScheduledExecutorService;
028import java.util.concurrent.ThreadPoolExecutor;
029import java.util.concurrent.TimeUnit;
030import org.apache.hadoop.conf.Configuration;
031import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
032import org.apache.hadoop.hbase.HBaseClassTestRule;
033import org.apache.hadoop.hbase.HBaseTestingUtility;
034import org.apache.hadoop.hbase.ServerName;
035import org.apache.hadoop.hbase.TableName;
036import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
037import org.apache.hadoop.hbase.client.RegionInfo;
038import org.apache.hadoop.hbase.client.RegionInfoBuilder;
039import org.apache.hadoop.hbase.client.TableDescriptor;
040import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
041import org.apache.hadoop.hbase.testclassification.LargeTests;
042import org.apache.hadoop.hbase.testclassification.RegionServerTests;
043import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
044import org.apache.hadoop.hbase.util.TableDescriptorChecker;
045import org.apache.hadoop.metrics2.MetricsExecutor;
046import org.junit.AfterClass;
047import org.junit.Assert;
048import org.junit.BeforeClass;
049import org.junit.ClassRule;
050import org.junit.Test;
051import org.junit.experimental.categories.Category;
052import org.slf4j.Logger;
053import org.slf4j.LoggerFactory;
054
055@Category({ RegionServerTests.class, LargeTests.class })
056public class TestOpenRegionFailedMemoryLeak {
057
058  @ClassRule
059  public static final HBaseClassTestRule CLASS_RULE =
060    HBaseClassTestRule.forClass(TestOpenRegionFailedMemoryLeak.class);
061
062  private static final Logger LOG = LoggerFactory.getLogger(TestOpenRegionFailedMemoryLeak.class);
063
064  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
065
066  @BeforeClass
067  public static void startCluster() throws Exception {
068    Configuration conf = TEST_UTIL.getConfiguration();
069
070    // Enable sanity check for coprocessor
071    conf.setBoolean(TableDescriptorChecker.TABLE_SANITY_CHECKS, true);
072  }
073
074  @AfterClass
075  public static void tearDown() throws IOException {
076    EnvironmentEdgeManagerTestHelper.reset();
077    LOG.info("Cleaning test directory: " + TEST_UTIL.getDataTestDir());
078    TEST_UTIL.cleanupTestDir();
079  }
080
081  // make sure the region is successfully closed when the coprocessor config is wrong
082  @Test
083  public void testOpenRegionFailedMemoryLeak() throws Exception {
084    final ServerName serverName = ServerName.valueOf("testOpenRegionFailed", 100, 42);
085    final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName));
086
087    TableDescriptor htd =
088      TableDescriptorBuilder.newBuilder(TableName.valueOf("testOpenRegionFailed"))
089        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1))
090        .setValue("COPROCESSOR$1", "hdfs://test/test.jar|test||").build();
091
092    RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
093    ScheduledExecutorService executor =
094      CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
095    for (int i = 0; i < 20; i++) {
096      try {
097        HRegion.openHRegion(hri, htd, rss.getWAL(hri), TEST_UTIL.getConfiguration(), rss, null);
098        fail("Should fail otherwise the test will be useless");
099      } catch (Throwable t) {
100        LOG.info("Expected exception, continue", t);
101      }
102    }
103    TimeUnit.SECONDS.sleep(MetricsRegionWrapperImpl.PERIOD);
104    Field[] fields = ThreadPoolExecutor.class.getDeclaredFields();
105    boolean found = false;
106    for (Field field : fields) {
107      if (field.getName().equals("workQueue")) {
108        field.setAccessible(true);
109        BlockingQueue<Runnable> workQueue = (BlockingQueue<Runnable>) field.get(executor);
110        // there are still two task not cancel, can not cause to memory lack
111        Assert.assertTrue("ScheduledExecutor#workQueue should equals 2, now is " + workQueue.size()
112          + ", please check region is close", 2 == workQueue.size());
113        found = true;
114      }
115    }
116    Assert.assertTrue("can not find workQueue, test failed", found);
117  }
118
119}