001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import java.io.IOException; 021import java.util.ArrayList; 022import java.util.Arrays; 023import java.util.List; 024import org.apache.hadoop.conf.Configuration; 025import org.apache.hadoop.fs.FileSystem; 026import org.apache.hadoop.fs.Path; 027import org.apache.hadoop.hbase.HBaseTestingUtil; 028import org.apache.hadoop.hbase.HConstants; 029import org.apache.hadoop.hbase.TableName; 030import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 031import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 032import org.apache.hadoop.hbase.client.RegionInfo; 033import org.apache.hadoop.hbase.client.RegionInfoBuilder; 034import org.apache.hadoop.hbase.client.TableDescriptor; 035import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 036import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; 037import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; 038import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; 039import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest; 040import org.apache.hadoop.hbase.regionserver.wal.FSHLog; 041import org.apache.hadoop.hbase.util.Bytes; 042import org.apache.hadoop.hbase.util.CommonFSUtils; 043import org.junit.After; 044import org.junit.Assert; 045import org.junit.Before; 046import org.slf4j.Logger; 047import org.slf4j.LoggerFactory; 048 049import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 050 051public class TestCompactionPolicy { 052 private final static Logger LOG = LoggerFactory.getLogger(TestCompactionPolicy.class); 053 protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); 054 055 protected Configuration conf; 056 protected HStore store; 057 private static final String DIR = 058 TEST_UTIL.getDataTestDir(TestCompactionPolicy.class.getSimpleName()).toString(); 059 protected static Path TEST_FILE; 060 protected static final int minFiles = 3; 061 protected static final int maxFiles = 5; 062 063 protected static final long minSize = 10; 064 protected static final long maxSize = 2100; 065 066 private FSHLog hlog; 067 private HRegion region; 068 069 @Before 070 public void setUp() throws Exception { 071 config(); 072 initialize(); 073 } 074 075 /** 076 * setup config values necessary for store 077 */ 078 protected void config() { 079 this.conf = TEST_UTIL.getConfiguration(); 080 this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0); 081 this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, minFiles); 082 this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, maxFiles); 083 this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, minSize); 084 this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, maxSize); 085 this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.0F); 086 } 087 088 /** 089 * Setting up a Store 090 * @throws IOException with error 091 */ 092 protected void initialize() throws IOException { 093 Path basedir = new Path(DIR); 094 String logName = "logs"; 095 Path logdir = new Path(DIR, logName); 096 ColumnFamilyDescriptor familyDescriptor = 097 ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("family")); 098 FileSystem fs = FileSystem.get(conf); 099 100 fs.delete(logdir, true); 101 102 TableDescriptor tableDescriptor = 103 TableDescriptorBuilder.newBuilder(TableName.valueOf(Bytes.toBytes("table"))) 104 .setColumnFamily(familyDescriptor).build(); 105 RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); 106 107 hlog = new FSHLog(fs, basedir, logName, conf); 108 hlog.init(); 109 ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, 110 MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); 111 region = HRegion.createHRegion(info, basedir, conf, tableDescriptor, hlog); 112 region.close(); 113 Path tableDir = CommonFSUtils.getTableDir(basedir, tableDescriptor.getTableName()); 114 region = new HRegion(tableDir, hlog, fs, conf, info, tableDescriptor, null); 115 116 store = new HStore(region, familyDescriptor, conf, false); 117 118 TEST_FILE = region.getRegionFileSystem().createTempName(); 119 fs.createNewFile(TEST_FILE); 120 } 121 122 @After 123 public void tearDown() throws IOException { 124 IOException ex = null; 125 try { 126 region.close(); 127 } catch (IOException e) { 128 LOG.warn("Caught Exception", e); 129 ex = e; 130 } 131 try { 132 hlog.close(); 133 } catch (IOException e) { 134 LOG.warn("Caught Exception", e); 135 ex = e; 136 } 137 if (ex != null) { 138 throw ex; 139 } 140 } 141 142 ArrayList<Long> toArrayList(long... numbers) { 143 ArrayList<Long> result = new ArrayList<>(); 144 for (long i : numbers) { 145 result.add(i); 146 } 147 return result; 148 } 149 150 List<HStoreFile> sfCreate(long... sizes) throws IOException { 151 ArrayList<Long> ageInDisk = new ArrayList<>(); 152 for (int i = 0; i < sizes.length; i++) { 153 ageInDisk.add(0L); 154 } 155 return sfCreate(toArrayList(sizes), ageInDisk); 156 } 157 158 List<HStoreFile> sfCreate(ArrayList<Long> sizes, ArrayList<Long> ageInDisk) throws IOException { 159 return sfCreate(false, sizes, ageInDisk); 160 } 161 162 List<HStoreFile> sfCreate(boolean isReference, long... sizes) throws IOException { 163 ArrayList<Long> ageInDisk = new ArrayList<>(sizes.length); 164 for (int i = 0; i < sizes.length; i++) { 165 ageInDisk.add(0L); 166 } 167 return sfCreate(isReference, toArrayList(sizes), ageInDisk); 168 } 169 170 List<HStoreFile> sfCreate(boolean isReference, ArrayList<Long> sizes, ArrayList<Long> ageInDisk) 171 throws IOException { 172 List<HStoreFile> ret = Lists.newArrayList(); 173 StoreFileTrackerForTest storeFileTrackerForTest = 174 new StoreFileTrackerForTest(conf, true, store.getStoreContext()); 175 for (int i = 0; i < sizes.size(); i++) { 176 ret.add(new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes.get(i), ageInDisk.get(i), isReference, 177 i, storeFileTrackerForTest)); 178 } 179 return ret; 180 } 181 182 long[] getSizes(List<HStoreFile> sfList) { 183 long[] aNums = new long[sfList.size()]; 184 for (int i = 0; i < sfList.size(); ++i) { 185 aNums[i] = sfList.get(i).getReader().length(); 186 } 187 return aNums; 188 } 189 190 void compactEquals(List<HStoreFile> candidates, long... expected) throws IOException { 191 compactEquals(candidates, false, false, expected); 192 } 193 194 void compactEquals(List<HStoreFile> candidates, boolean forcemajor, long... expected) 195 throws IOException { 196 compactEquals(candidates, forcemajor, false, expected); 197 } 198 199 void compactEquals(List<HStoreFile> candidates, boolean forcemajor, boolean isOffPeak, 200 long... expected) throws IOException { 201 store.forceMajor = forcemajor; 202 // Test Default compactions 203 CompactionRequestImpl result = 204 ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()) 205 .selectCompaction(candidates, new ArrayList<>(), false, isOffPeak, forcemajor); 206 List<HStoreFile> actual = new ArrayList<>(result.getFiles()); 207 if (isOffPeak && !forcemajor) { 208 Assert.assertTrue(result.isOffPeak()); 209 } 210 Assert.assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual))); 211 store.forceMajor = false; 212 } 213}