001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.encoding; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertTrue; 022 023import java.io.IOException; 024import java.util.ArrayList; 025import java.util.Collection; 026import java.util.List; 027import java.util.Map; 028import org.apache.hadoop.hbase.ArrayBackedTag; 029import org.apache.hadoop.hbase.HBaseClassTestRule; 030import org.apache.hadoop.hbase.HBaseTestingUtil; 031import org.apache.hadoop.hbase.HConstants; 032import org.apache.hadoop.hbase.KeyValue; 033import org.apache.hadoop.hbase.Tag; 034import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 035import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 036import org.apache.hadoop.hbase.client.Durability; 037import org.apache.hadoop.hbase.client.Get; 038import org.apache.hadoop.hbase.client.Put; 039import org.apache.hadoop.hbase.client.Result; 040import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; 041import org.apache.hadoop.hbase.io.hfile.HFile; 042import org.apache.hadoop.hbase.io.hfile.LruBlockCache; 043import org.apache.hadoop.hbase.regionserver.BloomType; 044import org.apache.hadoop.hbase.regionserver.HRegion; 045import org.apache.hadoop.hbase.regionserver.Region; 046import org.apache.hadoop.hbase.testclassification.IOTests; 047import org.apache.hadoop.hbase.testclassification.LargeTests; 048import org.apache.hadoop.hbase.util.Bytes; 049import org.apache.hadoop.hbase.util.LoadTestKVGenerator; 050import org.apache.hadoop.hbase.util.Strings; 051import org.junit.ClassRule; 052import org.junit.Test; 053import org.junit.experimental.categories.Category; 054import org.junit.runner.RunWith; 055import org.junit.runners.Parameterized; 056import org.junit.runners.Parameterized.Parameters; 057 058/** 059 * Tests encoded seekers by loading and reading values. 060 */ 061@Category({ IOTests.class, LargeTests.class }) 062@RunWith(Parameterized.class) 063public class TestEncodedSeekers { 064 065 @ClassRule 066 public static final HBaseClassTestRule CLASS_RULE = 067 HBaseClassTestRule.forClass(TestEncodedSeekers.class); 068 069 private static final String TABLE_NAME = "encodedSeekersTable"; 070 private static final String CF_NAME = "encodedSeekersCF"; 071 private static final byte[] CF_BYTES = Bytes.toBytes(CF_NAME); 072 private static final int MAX_VERSIONS = 5; 073 074 private static final int BLOCK_SIZE = 64 * 1024; 075 private static final int MIN_VALUE_SIZE = 30; 076 private static final int MAX_VALUE_SIZE = 60; 077 private static final int NUM_ROWS = 1003; 078 private static final int NUM_COLS_PER_ROW = 20; 079 private static final int NUM_HFILES = 4; 080 private static final int NUM_ROWS_PER_FLUSH = NUM_ROWS / NUM_HFILES; 081 082 private final HBaseTestingUtil testUtil = new HBaseTestingUtil(); 083 private final DataBlockEncoding encoding; 084 private final boolean includeTags; 085 private final boolean compressTags; 086 087 /** Enable when debugging */ 088 private static final boolean VERBOSE = false; 089 090 @Parameters 091 public static Collection<Object[]> parameters() { 092 List<Object[]> paramList = new ArrayList<>(); 093 for (DataBlockEncoding encoding : DataBlockEncoding.values()) { 094 for (boolean includeTags : new boolean[] { false, true }) { 095 for (boolean compressTags : new boolean[] { false, true }) { 096 paramList.add(new Object[] { encoding, includeTags, compressTags }); 097 } 098 } 099 } 100 return paramList; 101 } 102 103 public TestEncodedSeekers(DataBlockEncoding encoding, boolean includeTags, boolean compressTags) { 104 this.encoding = encoding; 105 this.includeTags = includeTags; 106 this.compressTags = compressTags; 107 } 108 109 @Test 110 public void testEncodedSeeker() throws IOException { 111 System.err.println("Testing encoded seekers for encoding : " + encoding + ", includeTags : " 112 + includeTags + ", compressTags : " + compressTags); 113 if (includeTags) { 114 testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3); 115 } 116 117 LruBlockCache cache = 118 (LruBlockCache) BlockCacheFactory.createBlockCache(testUtil.getConfiguration()); 119 // Need to disable default row bloom filter for this test to pass. 120 ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(CF_BYTES) 121 .setMaxVersions(MAX_VERSIONS).setDataBlockEncoding(encoding).setBlocksize(BLOCK_SIZE) 122 .setBloomFilterType(BloomType.NONE).setCompressTags(compressTags).build(); 123 HRegion region = testUtil.createTestRegion(TABLE_NAME, cfd, cache); 124 125 // write the data, but leave some in the memstore 126 doPuts(region); 127 128 // verify correctness when memstore contains data 129 doGets(region); 130 131 // verify correctness again after compacting 132 region.compact(false); 133 doGets(region); 134 135 Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest(); 136 137 // Ensure that compactions don't pollute the cache with unencoded blocks 138 // in case of in-cache-only encoding. 139 System.err.println("encodingCounts=" + encodingCounts); 140 assertEquals(1, encodingCounts.size()); 141 DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next(); 142 assertEquals(encoding, encodingInCache); 143 assertTrue(encodingCounts.get(encodingInCache) > 0); 144 } 145 146 private void doPuts(HRegion region) throws IOException { 147 LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE); 148 for (int i = 0; i < NUM_ROWS; ++i) { 149 byte[] key = Bytes.toBytes(LoadTestKVGenerator.md5PrefixedKey(i)); 150 for (int j = 0; j < NUM_COLS_PER_ROW; ++j) { 151 Put put = new Put(key); 152 put.setDurability(Durability.ASYNC_WAL); 153 byte[] col = Bytes.toBytes(String.valueOf(j)); 154 byte[] value = dataGenerator.generateRandomSizeValue(key, col); 155 if (includeTags) { 156 Tag[] tag = new Tag[1]; 157 tag[0] = new ArrayBackedTag((byte) 1, "Visibility"); 158 KeyValue kv = new KeyValue(key, CF_BYTES, col, HConstants.LATEST_TIMESTAMP, value, tag); 159 put.add(kv); 160 } else { 161 put.addColumn(CF_BYTES, col, value); 162 } 163 if (VERBOSE) { 164 KeyValue kvPut = new KeyValue(key, CF_BYTES, col, value); 165 System.err.println(Strings.padFront(i + "", ' ', 4) + " " + kvPut); 166 } 167 region.put(put); 168 } 169 if (i % NUM_ROWS_PER_FLUSH == 0) { 170 region.flush(true); 171 } 172 } 173 } 174 175 private void doGets(Region region) throws IOException { 176 for (int i = 0; i < NUM_ROWS; ++i) { 177 final byte[] rowKey = Bytes.toBytes(LoadTestKVGenerator.md5PrefixedKey(i)); 178 for (int j = 0; j < NUM_COLS_PER_ROW; ++j) { 179 final String qualStr = String.valueOf(j); 180 if (VERBOSE) { 181 System.err.println( 182 "Reading row " + i + ", column " + j + " " + Bytes.toString(rowKey) + "/" + qualStr); 183 } 184 final byte[] qualBytes = Bytes.toBytes(qualStr); 185 Get get = new Get(rowKey); 186 get.addColumn(CF_BYTES, qualBytes); 187 Result result = region.get(get); 188 assertEquals(1, result.size()); 189 byte[] value = result.getValue(CF_BYTES, qualBytes); 190 assertTrue(LoadTestKVGenerator.verify(value, rowKey, qualBytes)); 191 } 192 } 193 } 194}