001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; 021import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT; 022import static org.apache.hadoop.hbase.coprocessor.CoprocessorHost.REGION_COPROCESSOR_CONF_KEY; 023import static org.hamcrest.CoreMatchers.instanceOf; 024import static org.hamcrest.MatcherAssert.assertThat; 025import static org.junit.Assert.assertEquals; 026import static org.junit.Assert.assertTrue; 027import static org.junit.Assert.fail; 028 029import java.io.IOException; 030import java.util.Optional; 031import java.util.concurrent.CompletionException; 032import java.util.concurrent.ExecutionException; 033import java.util.concurrent.TimeUnit; 034import java.util.concurrent.atomic.AtomicReference; 035import org.apache.hadoop.conf.Configuration; 036import org.apache.hadoop.hbase.HBaseClassTestRule; 037import org.apache.hadoop.hbase.HBaseTestingUtility; 038import org.apache.hadoop.hbase.HRegionLocation; 039import org.apache.hadoop.hbase.TableName; 040import org.apache.hadoop.hbase.TableNotFoundException; 041import org.apache.hadoop.hbase.coprocessor.ObserverContext; 042import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; 043import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; 044import org.apache.hadoop.hbase.coprocessor.RegionObserver; 045import org.apache.hadoop.hbase.exceptions.TimeoutIOException; 046import org.apache.hadoop.hbase.security.User; 047import org.apache.hadoop.hbase.testclassification.ClientTests; 048import org.apache.hadoop.hbase.testclassification.MediumTests; 049import org.apache.hadoop.hbase.util.Bytes; 050import org.apache.hadoop.hbase.util.Threads; 051import org.junit.After; 052import org.junit.AfterClass; 053import org.junit.BeforeClass; 054import org.junit.ClassRule; 055import org.junit.Test; 056import org.junit.experimental.categories.Category; 057 058import org.apache.hbase.thirdparty.com.google.common.io.Closeables; 059 060@Category({ MediumTests.class, ClientTests.class }) 061public class TestAsyncRegionLocator { 062 063 @ClassRule 064 public static final HBaseClassTestRule CLASS_RULE = 065 HBaseClassTestRule.forClass(TestAsyncRegionLocator.class); 066 067 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 068 069 private static TableName TABLE_NAME = TableName.valueOf("async"); 070 071 private static byte[] FAMILY = Bytes.toBytes("cf"); 072 073 private static AsyncConnectionImpl CONN; 074 075 private static AsyncRegionLocator LOCATOR; 076 077 private static volatile long SLEEP_MS = 0L; 078 079 public static class SleepRegionObserver implements RegionCoprocessor, RegionObserver { 080 @Override 081 public Optional<RegionObserver> getRegionObserver() { 082 return Optional.of(this); 083 } 084 085 @Override 086 public void preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan) 087 throws IOException { 088 if (SLEEP_MS > 0) { 089 Threads.sleepWithoutInterrupt(SLEEP_MS); 090 } 091 } 092 } 093 094 @BeforeClass 095 public static void setUp() throws Exception { 096 Configuration conf = TEST_UTIL.getConfiguration(); 097 conf.set(REGION_COPROCESSOR_CONF_KEY, SleepRegionObserver.class.getName()); 098 conf.setLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, 2000); 099 TEST_UTIL.startMiniCluster(1); 100 TEST_UTIL.createTable(TABLE_NAME, FAMILY); 101 TEST_UTIL.waitTableAvailable(TABLE_NAME); 102 ConnectionRegistry registry = 103 ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent()); 104 CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, 105 registry.getClusterId().get(), User.getCurrent()); 106 LOCATOR = CONN.getLocator(); 107 } 108 109 @AfterClass 110 public static void tearDown() throws Exception { 111 Closeables.close(CONN, true); 112 TEST_UTIL.shutdownMiniCluster(); 113 } 114 115 @After 116 public void tearDownAfterTest() { 117 LOCATOR.clearCache(); 118 } 119 120 @Test 121 public void testTimeout() throws InterruptedException, ExecutionException { 122 SLEEP_MS = 1000; 123 long startNs = System.nanoTime(); 124 try { 125 LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW, RegionLocateType.CURRENT, 126 TimeUnit.MILLISECONDS.toNanos(500)).get(); 127 fail(); 128 } catch (ExecutionException e) { 129 assertThat(e.getCause(), instanceOf(TimeoutIOException.class)); 130 } 131 long costMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); 132 assertTrue(costMs >= 500); 133 assertTrue(costMs < 1000); 134 // wait for the background task finish 135 Thread.sleep(2000); 136 // Now the location should be in cache, so we will not visit meta again. 137 HRegionLocation loc = LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW, 138 RegionLocateType.CURRENT, TimeUnit.MILLISECONDS.toNanos(500)).get(); 139 assertEquals(loc.getServerName(), 140 TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName()); 141 } 142 143 @Test 144 public void testNoCompletionException() { 145 // make sure that we do not get CompletionException 146 SLEEP_MS = 0; 147 AtomicReference<Throwable> errorHolder = new AtomicReference<>(); 148 try { 149 LOCATOR.getRegionLocation(TableName.valueOf("NotExist"), EMPTY_START_ROW, 150 RegionLocateType.CURRENT, TimeUnit.SECONDS.toNanos(1)) 151 .whenComplete((r, e) -> errorHolder.set(e)).join(); 152 fail(); 153 } catch (CompletionException e) { 154 // join will return a CompletionException, which is OK 155 assertThat(e.getCause(), instanceOf(TableNotFoundException.class)); 156 } 157 // but we need to make sure that we do not get a CompletionException in the callback 158 assertThat(errorHolder.get(), instanceOf(TableNotFoundException.class)); 159 } 160}