001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.hfile; 019 020import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.assertTrue; 023 024import java.io.File; 025import java.util.List; 026import org.apache.hadoop.conf.Configuration; 027import org.apache.hadoop.fs.Path; 028import org.apache.hadoop.hbase.HBaseClassTestRule; 029import org.apache.hadoop.hbase.HBaseTestingUtility; 030import org.apache.hadoop.hbase.MiniHBaseCluster; 031import org.apache.hadoop.hbase.StartMiniClusterOption; 032import org.apache.hadoop.hbase.TableName; 033import org.apache.hadoop.hbase.client.Admin; 034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 035import org.apache.hadoop.hbase.client.Put; 036import org.apache.hadoop.hbase.client.Table; 037import org.apache.hadoop.hbase.client.TableDescriptor; 038import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 039import org.apache.hadoop.hbase.regionserver.HRegionServer; 040import org.apache.hadoop.hbase.regionserver.HStoreFile; 041import org.apache.hadoop.hbase.testclassification.IOTests; 042import org.apache.hadoop.hbase.testclassification.LargeTests; 043import org.apache.hadoop.hbase.util.Bytes; 044import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; 045import org.junit.After; 046import org.junit.Before; 047import org.junit.ClassRule; 048import org.junit.Test; 049import org.junit.experimental.categories.Category; 050import org.slf4j.Logger; 051import org.slf4j.LoggerFactory; 052 053@Category({ IOTests.class, LargeTests.class }) 054public class TestPrefetchRSClose { 055 056 @ClassRule 057 public static final HBaseClassTestRule CLASS_RULE = 058 HBaseClassTestRule.forClass(TestPrefetchRSClose.class); 059 060 private static final Logger LOG = LoggerFactory.getLogger(TestPrefetchRSClose.class); 061 062 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 063 064 private Configuration conf; 065 Path testDir; 066 MiniZooKeeperCluster zkCluster; 067 MiniHBaseCluster cluster; 068 StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(1).build(); 069 070 @Before 071 public void setup() throws Exception { 072 conf = TEST_UTIL.getConfiguration(); 073 testDir = TEST_UTIL.getDataTestDir(); 074 TEST_UTIL.getTestFileSystem().mkdirs(testDir); 075 076 conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); 077 conf.set(BUCKET_CACHE_IOENGINE_KEY, "file:" + testDir + "/bucket.cache"); 078 conf.setInt("hbase.bucketcache.size", 400); 079 conf.set("hbase.bucketcache.persistent.path", testDir + "/bucket.persistence"); 080 zkCluster = TEST_UTIL.startMiniZKCluster(); 081 cluster = TEST_UTIL.startMiniHBaseCluster(option); 082 cluster.setConf(conf); 083 } 084 085 @Test 086 public void testPrefetchPersistence() throws Exception { 087 088 // Write to table and flush 089 TableName tableName = TableName.valueOf("table1"); 090 byte[] row0 = Bytes.toBytes("row1"); 091 byte[] row1 = Bytes.toBytes("row2"); 092 byte[] family = Bytes.toBytes("family"); 093 byte[] qf1 = Bytes.toBytes("qf1"); 094 byte[] qf2 = Bytes.toBytes("qf2"); 095 byte[] value1 = Bytes.toBytes("value1"); 096 byte[] value2 = Bytes.toBytes("value2"); 097 098 TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) 099 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); 100 Table table = TEST_UTIL.createTable(td, null); 101 try { 102 // put data 103 Put put0 = new Put(row0); 104 put0.addColumn(family, qf1, 1, value1); 105 table.put(put0); 106 Put put1 = new Put(row1); 107 put1.addColumn(family, qf2, 1, value2); 108 table.put(put1); 109 TEST_UTIL.flush(tableName); 110 } finally { 111 Thread.sleep(2000); 112 } 113 114 // Default interval for cache persistence is 1000ms. So after 1000ms, both the persistence files 115 // should exist. 116 117 HRegionServer regionServingRS = cluster.getRegionServer(0); 118 119 Admin admin = TEST_UTIL.getAdmin(); 120 List<String> cachedFilesList = admin.getCachedFilesList(regionServingRS.getServerName()); 121 assertEquals(1, cachedFilesList.size()); 122 for (HStoreFile h : regionServingRS.getRegions().get(0).getStores().get(0).getStorefiles()) { 123 assertTrue(cachedFilesList.contains(h.getPath().getName())); 124 } 125 126 // Stop the RS 127 cluster.stopRegionServer(0); 128 LOG.info("Stopped Region Server 0."); 129 Thread.sleep(1000); 130 assertTrue(new File(testDir + "/bucket.persistence").exists()); 131 } 132 133 @After 134 public void tearDown() throws Exception { 135 TEST_UTIL.shutdownMiniCluster(); 136 TEST_UTIL.cleanupDataTestDirOnTestFS(String.valueOf(testDir)); 137 if (zkCluster != null) { 138 zkCluster.shutdown(); 139 } 140 } 141}