001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.fail; 022 023import java.io.IOException; 024import java.util.List; 025import java.util.concurrent.ThreadPoolExecutor; 026import org.apache.hadoop.conf.Configuration; 027import org.apache.hadoop.fs.FileSystem; 028import org.apache.hadoop.fs.Path; 029import org.apache.hadoop.hbase.HBaseClassTestRule; 030import org.apache.hadoop.hbase.HBaseTestingUtil; 031import org.apache.hadoop.hbase.HConstants; 032import org.apache.hadoop.hbase.TableName; 033import org.apache.hadoop.hbase.client.Admin; 034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 035import org.apache.hadoop.hbase.client.Connection; 036import org.apache.hadoop.hbase.client.ConnectionFactory; 037import org.apache.hadoop.hbase.client.RegionInfo; 038import org.apache.hadoop.hbase.client.RegionInfoBuilder; 039import org.apache.hadoop.hbase.client.TableDescriptor; 040import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 041import org.apache.hadoop.hbase.executor.ExecutorType; 042import org.apache.hadoop.hbase.testclassification.MediumTests; 043import org.apache.hadoop.hbase.testclassification.RegionServerTests; 044import org.apache.hadoop.hbase.util.Bytes; 045import org.apache.hadoop.hbase.util.CommonFSUtils; 046import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 047import org.junit.AfterClass; 048import org.junit.BeforeClass; 049import org.junit.ClassRule; 050import org.junit.Rule; 051import org.junit.Test; 052import org.junit.experimental.categories.Category; 053import org.junit.rules.TestName; 054import org.slf4j.Logger; 055import org.slf4j.LoggerFactory; 056 057@Category({ MediumTests.class, RegionServerTests.class }) 058public class TestRegionOpen { 059 060 @ClassRule 061 public static final HBaseClassTestRule CLASS_RULE = 062 HBaseClassTestRule.forClass(TestRegionOpen.class); 063 064 private static final Logger LOG = LoggerFactory.getLogger(TestRegionOpen.class); 065 private static final int NB_SERVERS = 1; 066 067 private static final HBaseTestingUtil HTU = new HBaseTestingUtil(); 068 069 @Rule 070 public TestName name = new TestName(); 071 072 @BeforeClass 073 public static void before() throws Exception { 074 HTU.startMiniCluster(NB_SERVERS); 075 } 076 077 @AfterClass 078 public static void afterClass() throws Exception { 079 HTU.shutdownMiniCluster(); 080 } 081 082 private static HRegionServer getRS() { 083 return HTU.getHBaseCluster().getLiveRegionServerThreads().get(0).getRegionServer(); 084 } 085 086 @Test 087 public void testPriorityRegionIsOpenedWithSeparateThreadPool() throws Exception { 088 final TableName tableName = TableName.valueOf(TestRegionOpen.class.getSimpleName()); 089 ThreadPoolExecutor exec = 090 getRS().getExecutorService().getExecutorThreadPool(ExecutorType.RS_OPEN_PRIORITY_REGION); 091 long completed = exec.getCompletedTaskCount(); 092 093 TableDescriptor tableDescriptor = 094 TableDescriptorBuilder.newBuilder(tableName).setPriority(HConstants.HIGH_QOS) 095 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); 096 try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); 097 Admin admin = connection.getAdmin()) { 098 admin.createTable(tableDescriptor); 099 } 100 101 assertEquals(completed + 1, exec.getCompletedTaskCount()); 102 } 103 104 @Test 105 public void testNonExistentRegionReplica() throws Exception { 106 final TableName tableName = TableName.valueOf(name.getMethodName()); 107 final byte[] FAMILYNAME = Bytes.toBytes("fam"); 108 FileSystem fs = HTU.getTestFileSystem(); 109 Admin admin = HTU.getAdmin(); 110 Configuration conf = HTU.getConfiguration(); 111 Path rootDir = HTU.getDataTestDirOnTestFS(); 112 113 TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) 114 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYNAME)).build(); 115 admin.createTable(htd); 116 HTU.waitUntilNoRegionsInTransition(60000); 117 118 // Create new HRI with non-default region replica id 119 RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()) 120 .setStartKey(Bytes.toBytes("A")).setEndKey(Bytes.toBytes("B")) 121 .setRegionId(EnvironmentEdgeManager.currentTime()).setReplicaId(2).build(); 122 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, 123 CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri); 124 Path regionDir = regionFs.getRegionDir(); 125 try { 126 HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); 127 } catch (IOException e) { 128 LOG.info("Caught expected IOE due missing .regioninfo file, due: " + e.getMessage() 129 + " skipping region open."); 130 // We should only have 1 region online 131 List<RegionInfo> regions = admin.getRegions(tableName); 132 LOG.info("Regions: " + regions); 133 if (regions.size() != 1) { 134 fail("Table " + tableName + " should have only one region, but got more: " + regions); 135 } 136 return; 137 } 138 fail("Should have thrown IOE when attempting to open a non-existing region."); 139 } 140}