001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.coprocessor; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertNull; 022 023import java.io.IOException; 024import java.util.Collections; 025import java.util.List; 026import java.util.Optional; 027import java.util.concurrent.CountDownLatch; 028import org.apache.hadoop.conf.Configuration; 029import org.apache.hadoop.fs.FileSystem; 030import org.apache.hadoop.fs.Path; 031import org.apache.hadoop.hbase.Cell; 032import org.apache.hadoop.hbase.Coprocessor; 033import org.apache.hadoop.hbase.ExtendedCell; 034import org.apache.hadoop.hbase.HBaseClassTestRule; 035import org.apache.hadoop.hbase.HBaseTestingUtil; 036import org.apache.hadoop.hbase.HConstants; 037import org.apache.hadoop.hbase.TableName; 038import org.apache.hadoop.hbase.client.Admin; 039import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 040import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder; 041import org.apache.hadoop.hbase.client.Get; 042import org.apache.hadoop.hbase.client.Put; 043import org.apache.hadoop.hbase.client.RegionInfo; 044import org.apache.hadoop.hbase.client.RegionInfoBuilder; 045import org.apache.hadoop.hbase.client.Result; 046import org.apache.hadoop.hbase.client.Scan; 047import org.apache.hadoop.hbase.client.Table; 048import org.apache.hadoop.hbase.client.TableDescriptor; 049import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 050import org.apache.hadoop.hbase.filter.FilterBase; 051import org.apache.hadoop.hbase.regionserver.ChunkCreator; 052import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker; 053import org.apache.hadoop.hbase.regionserver.HRegion; 054import org.apache.hadoop.hbase.regionserver.HRegionServer; 055import org.apache.hadoop.hbase.regionserver.HStore; 056import org.apache.hadoop.hbase.regionserver.InternalScanner; 057import org.apache.hadoop.hbase.regionserver.MemStoreLAB; 058import org.apache.hadoop.hbase.regionserver.Region; 059import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; 060import org.apache.hadoop.hbase.regionserver.RegionServerServices; 061import org.apache.hadoop.hbase.regionserver.ScanType; 062import org.apache.hadoop.hbase.regionserver.ScannerContext; 063import org.apache.hadoop.hbase.regionserver.Store; 064import org.apache.hadoop.hbase.regionserver.StoreScanner; 065import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; 066import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; 067import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; 068import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; 069import org.apache.hadoop.hbase.security.User; 070import org.apache.hadoop.hbase.testclassification.CoprocessorTests; 071import org.apache.hadoop.hbase.testclassification.MediumTests; 072import org.apache.hadoop.hbase.util.Bytes; 073import org.apache.hadoop.hbase.wal.WAL; 074import org.junit.ClassRule; 075import org.junit.Rule; 076import org.junit.Test; 077import org.junit.experimental.categories.Category; 078import org.junit.rules.TestName; 079 080@Category({ CoprocessorTests.class, MediumTests.class }) 081public class TestRegionObserverScannerOpenHook { 082 083 @ClassRule 084 public static final HBaseClassTestRule CLASS_RULE = 085 HBaseClassTestRule.forClass(TestRegionObserverScannerOpenHook.class); 086 087 private static HBaseTestingUtil UTIL = new HBaseTestingUtil(); 088 static final Path DIR = UTIL.getDataTestDir(); 089 090 @Rule 091 public TestName name = new TestName(); 092 093 public static class NoDataFilter extends FilterBase { 094 095 @Override 096 public ReturnCode filterCell(final Cell ignored) { 097 return ReturnCode.SKIP; 098 } 099 100 @Override 101 public boolean filterAllRemaining() throws IOException { 102 return true; 103 } 104 105 @Override 106 public boolean filterRow() throws IOException { 107 return true; 108 } 109 } 110 111 /** 112 * Do the default logic in {@link RegionObserver} interface. 113 */ 114 public static class EmptyRegionObsever implements RegionCoprocessor, RegionObserver { 115 @Override 116 public Optional<RegionObserver> getRegionObserver() { 117 return Optional.of(this); 118 } 119 } 120 121 /** 122 * Don't return any data from a scan by creating a custom {@link StoreScanner}. 123 */ 124 public static class NoDataFromScan implements RegionCoprocessor, RegionObserver { 125 @Override 126 public Optional<RegionObserver> getRegionObserver() { 127 return Optional.of(this); 128 } 129 130 @Override 131 public void preGetOp(ObserverContext<? extends RegionCoprocessorEnvironment> c, Get get, 132 List<Cell> result) throws IOException { 133 c.bypass(); 134 } 135 136 @Override 137 public void preScannerOpen(ObserverContext<? extends RegionCoprocessorEnvironment> c, Scan scan) 138 throws IOException { 139 scan.setFilter(new NoDataFilter()); 140 } 141 } 142 143 private static final InternalScanner NO_DATA = new InternalScanner() { 144 145 @Override 146 public boolean next(List<? super ExtendedCell> result, ScannerContext scannerContext) 147 throws IOException { 148 return false; 149 } 150 151 @Override 152 public void close() throws IOException { 153 } 154 }; 155 156 /** 157 * Don't allow any data in a flush by creating a custom {@link StoreScanner}. 158 */ 159 public static class NoDataFromFlush implements RegionCoprocessor, RegionObserver { 160 @Override 161 public Optional<RegionObserver> getRegionObserver() { 162 return Optional.of(this); 163 } 164 165 @Override 166 public InternalScanner preFlush(ObserverContext<? extends RegionCoprocessorEnvironment> c, 167 Store store, InternalScanner scanner, FlushLifeCycleTracker tracker) throws IOException { 168 return NO_DATA; 169 } 170 } 171 172 /** 173 * Don't allow any data to be written out in the compaction by creating a custom 174 * {@link StoreScanner}. 175 */ 176 public static class NoDataFromCompaction implements RegionCoprocessor, RegionObserver { 177 @Override 178 public Optional<RegionObserver> getRegionObserver() { 179 return Optional.of(this); 180 } 181 182 @Override 183 public InternalScanner preCompact(ObserverContext<? extends RegionCoprocessorEnvironment> c, 184 Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, 185 CompactionRequest request) throws IOException { 186 return NO_DATA; 187 } 188 } 189 190 HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf, 191 byte[]... families) throws IOException { 192 TableDescriptorBuilder builder = 193 TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)); 194 for (byte[] family : families) { 195 builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); 196 } 197 TableDescriptor tableDescriptor = builder.build(); 198 ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, 199 MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); 200 RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); 201 Path path = new Path(DIR + callingMethod); 202 WAL wal = HBaseTestingUtil.createWal(conf, path, info); 203 HRegion r = HRegion.createHRegion(info, path, conf, tableDescriptor, wal); 204 // this following piece is a hack. currently a coprocessorHost 205 // is secretly loaded at OpenRegionHandler. we don't really 206 // start a region server here, so just manually create cphost 207 // and set it to region. 208 RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf); 209 r.setCoprocessorHost(host); 210 return r; 211 } 212 213 @Test 214 public void testRegionObserverScanTimeStacking() throws Exception { 215 byte[] ROW = Bytes.toBytes("testRow"); 216 byte[] TABLE = Bytes.toBytes(getClass().getName()); 217 byte[] A = Bytes.toBytes("A"); 218 byte[][] FAMILIES = new byte[][] { A }; 219 220 // Use new HTU to not overlap with the DFS cluster started in #CompactionStacking 221 Configuration conf = new HBaseTestingUtil().getConfiguration(); 222 HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES); 223 RegionCoprocessorHost h = region.getCoprocessorHost(); 224 h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf); 225 h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf); 226 227 Put put = new Put(ROW); 228 put.addColumn(A, A, A); 229 region.put(put); 230 231 Get get = new Get(ROW); 232 Result r = region.get(get); 233 assertNull( 234 "Got an unexpected number of rows - " 235 + "no data should be returned with the NoDataFromScan coprocessor. Found: " + r, 236 r.listCells()); 237 HBaseTestingUtil.closeRegionAndWAL(region); 238 } 239 240 @Test 241 public void testRegionObserverFlushTimeStacking() throws Exception { 242 byte[] ROW = Bytes.toBytes("testRow"); 243 byte[] TABLE = Bytes.toBytes(getClass().getName()); 244 byte[] A = Bytes.toBytes("A"); 245 byte[][] FAMILIES = new byte[][] { A }; 246 247 // Use new HTU to not overlap with the DFS cluster started in #CompactionStacking 248 Configuration conf = new HBaseTestingUtil().getConfiguration(); 249 HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES); 250 RegionCoprocessorHost h = region.getCoprocessorHost(); 251 h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf); 252 h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf); 253 254 // put a row and flush it to disk 255 Put put = new Put(ROW); 256 put.addColumn(A, A, A); 257 region.put(put); 258 region.flush(true); 259 Get get = new Get(ROW); 260 Result r = region.get(get); 261 assertNull( 262 "Got an unexpected number of rows - " 263 + "no data should be returned with the NoDataFromScan coprocessor. Found: " + r, 264 r.listCells()); 265 HBaseTestingUtil.closeRegionAndWAL(region); 266 } 267 268 /* 269 * Custom HRegion which uses CountDownLatch to signal the completion of compaction 270 */ 271 public static class CompactionCompletionNotifyingRegion extends HRegion { 272 private static volatile CountDownLatch compactionStateChangeLatch = null; 273 274 @SuppressWarnings("deprecation") 275 public CompactionCompletionNotifyingRegion(Path tableDir, WAL log, FileSystem fs, 276 Configuration confParam, RegionInfo info, TableDescriptor htd, 277 RegionServerServices rsServices) { 278 super(tableDir, log, fs, confParam, info, htd, rsServices); 279 } 280 281 public CountDownLatch getCompactionStateChangeLatch() { 282 if (compactionStateChangeLatch == null) { 283 compactionStateChangeLatch = new CountDownLatch(1); 284 } 285 return compactionStateChangeLatch; 286 } 287 288 @Override 289 public boolean compact(CompactionContext compaction, HStore store, 290 ThroughputController throughputController) throws IOException { 291 boolean ret = super.compact(compaction, store, throughputController); 292 if (ret) { 293 compactionStateChangeLatch.countDown(); 294 } 295 return ret; 296 } 297 298 @Override 299 public boolean compact(CompactionContext compaction, HStore store, 300 ThroughputController throughputController, User user) throws IOException { 301 boolean ret = super.compact(compaction, store, throughputController, user); 302 if (ret) compactionStateChangeLatch.countDown(); 303 return ret; 304 } 305 } 306 307 /** 308 * Unfortunately, the easiest way to test this is to spin up a mini-cluster since we want to do 309 * the usual compaction mechanism on the region, rather than going through the backdoor to the 310 * region 311 */ 312 @Test 313 public void testRegionObserverCompactionTimeStacking() throws Exception { 314 // setup a mini cluster so we can do a real compaction on a region 315 Configuration conf = UTIL.getConfiguration(); 316 conf.setClass(HConstants.REGION_IMPL, CompactionCompletionNotifyingRegion.class, HRegion.class); 317 conf.setInt("hbase.hstore.compaction.min", 2); 318 UTIL.startMiniCluster(); 319 byte[] ROW = Bytes.toBytes("testRow"); 320 byte[] A = Bytes.toBytes("A"); 321 TableDescriptor tableDescriptor = 322 TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) 323 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(A)) 324 .setCoprocessor(CoprocessorDescriptorBuilder 325 .newBuilder(EmptyRegionObsever.class.getName()).setJarPath(null) 326 .setPriority(Coprocessor.PRIORITY_USER).setProperties(Collections.emptyMap()).build()) 327 .setCoprocessor(CoprocessorDescriptorBuilder 328 .newBuilder(NoDataFromCompaction.class.getName()).setJarPath(null) 329 .setPriority(Coprocessor.PRIORITY_HIGHEST).setProperties(Collections.emptyMap()).build()) 330 .build(); 331 332 Admin admin = UTIL.getAdmin(); 333 admin.createTable(tableDescriptor); 334 335 Table table = UTIL.getConnection().getTable(tableDescriptor.getTableName()); 336 337 // put a row and flush it to disk 338 Put put = new Put(ROW); 339 put.addColumn(A, A, A); 340 table.put(put); 341 342 HRegionServer rs = UTIL.getRSForFirstRegionInTable(tableDescriptor.getTableName()); 343 List<HRegion> regions = rs.getRegions(tableDescriptor.getTableName()); 344 assertEquals("More than 1 region serving test table with 1 row", 1, regions.size()); 345 Region region = regions.get(0); 346 admin.flushRegion(region.getRegionInfo().getRegionName()); 347 CountDownLatch latch = 348 ((CompactionCompletionNotifyingRegion) region).getCompactionStateChangeLatch(); 349 350 // put another row and flush that too 351 put = new Put(Bytes.toBytes("anotherrow")); 352 put.addColumn(A, A, A); 353 table.put(put); 354 admin.flushRegion(region.getRegionInfo().getRegionName()); 355 356 // run a compaction, which normally would should get rid of the data 357 // wait for the compaction checker to complete 358 latch.await(); 359 // check both rows to ensure that they aren't there 360 Get get = new Get(ROW); 361 Result r = table.get(get); 362 assertNull( 363 "Got an unexpected number of rows - " 364 + "no data should be returned with the NoDataFromScan coprocessor. Found: " + r, 365 r.listCells()); 366 367 get = new Get(Bytes.toBytes("anotherrow")); 368 r = table.get(get); 369 assertNull( 370 "Got an unexpected number of rows - " 371 + "no data should be returned with the NoDataFromScan coprocessor Found: " + r, 372 r.listCells()); 373 374 table.close(); 375 UTIL.shutdownMiniCluster(); 376 } 377}