001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import java.io.IOException; 021import java.util.ArrayList; 022import java.util.List; 023import org.apache.hadoop.hbase.HBaseClassTestRule; 024import org.apache.hadoop.hbase.HConstants; 025import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; 026import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; 027import org.apache.hadoop.hbase.testclassification.SmallTests; 028import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 029import org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge; 030import org.junit.Assert; 031import org.junit.ClassRule; 032import org.junit.Test; 033import org.junit.experimental.categories.Category; 034 035@Category(SmallTests.class) 036public class TestDefaultCompactSelection extends TestCompactionPolicy { 037 038 @ClassRule 039 public static final HBaseClassTestRule CLASS_RULE = 040 HBaseClassTestRule.forClass(TestDefaultCompactSelection.class); 041 042 @Override 043 protected void config() { 044 super.config(); 045 // DON'T change this config since all test cases assume HStore.BLOCKING_STOREFILES_KEY is 10. 046 this.conf.setLong(HStore.BLOCKING_STOREFILES_KEY, 10); 047 } 048 049 @Test 050 public void testCompactionRatio() throws IOException { 051 TimeOffsetEnvironmentEdge edge = new TimeOffsetEnvironmentEdge(); 052 EnvironmentEdgeManager.injectEdge(edge); 053 /** 054 * NOTE: these tests are specific to describe the implementation of the current compaction 055 * algorithm. Developed to ensure that refactoring doesn't implicitly alter this. 056 */ 057 long tooBig = maxSize + 1; 058 059 // default case. preserve user ratio on size 060 compactEquals(sfCreate(100, 50, 23, 12, 12), 23, 12, 12); 061 // less than compact threshold = don't compact 062 compactEquals(sfCreate(100, 50, 25, 12, 12) /* empty */); 063 // greater than compact size = skip those 064 compactEquals(sfCreate(tooBig, tooBig, 700, 700, 700), 700, 700, 700); 065 // big size + threshold 066 compactEquals(sfCreate(tooBig, tooBig, 700, 700) /* empty */); 067 // small files = don't care about ratio 068 compactEquals(sfCreate(7, 1, 1), 7, 1, 1); 069 070 // don't exceed max file compact threshold 071 // note: file selection starts with largest to smallest. 072 compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1); 073 074 compactEquals(sfCreate(50, 10, 10, 10, 10), 10, 10, 10, 10); 075 076 compactEquals(sfCreate(10, 10, 10, 10, 50), 10, 10, 10, 10); 077 078 compactEquals(sfCreate(251, 253, 251, maxSize - 1), 251, 253, 251); 079 080 compactEquals(sfCreate(maxSize - 1, maxSize - 1, maxSize - 1) /* empty */); 081 082 // Always try and compact something to get below blocking storefile count 083 this.conf.setLong("hbase.hstore.compaction.min.size", 1); 084 store.storeEngine.getCompactionPolicy().setConf(conf); 085 compactEquals(sfCreate(512, 256, 128, 64, 32, 16, 8, 4, 2, 1), 4, 2, 1); 086 this.conf.setLong("hbase.hstore.compaction.min.size", minSize); 087 store.storeEngine.getCompactionPolicy().setConf(conf); 088 089 /* MAJOR COMPACTION */ 090 // if a major compaction has been forced, then compact everything 091 compactEquals(sfCreate(50, 25, 12, 12), true, 50, 25, 12, 12); 092 // also choose files < threshold on major compaction 093 compactEquals(sfCreate(12, 12), true, 12, 12); 094 // even if one of those files is too big 095 compactEquals(sfCreate(tooBig, 12, 12), true, tooBig, 12, 12); 096 // don't exceed max file compact threshold, even with major compaction 097 store.forceMajor = true; 098 compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1); 099 store.forceMajor = false; 100 // if we exceed maxCompactSize, downgrade to minor 101 // if not, it creates a 'snowball effect' when files >> maxCompactSize: 102 // the last file in compaction is the aggregate of all previous compactions 103 compactEquals(sfCreate(100, 50, 23, 12, 12), true, 23, 12, 12); 104 conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1); 105 conf.setFloat("hbase.hregion.majorcompaction.jitter", 0); 106 store.storeEngine.getCompactionPolicy().setConf(conf); 107 try { 108 // The modTime of the mocked store file is the current time, so we need to increase the 109 // timestamp a bit to make sure that now - lowestModTime is greater than major compaction 110 // period(1ms). 111 // trigger an aged major compaction 112 List<HStoreFile> candidates = sfCreate(50, 25, 12, 12); 113 edge.increment(2); 114 compactEquals(candidates, 50, 25, 12, 12); 115 // major sure exceeding maxCompactSize also downgrades aged minors 116 candidates = sfCreate(100, 50, 23, 12, 12); 117 edge.increment(2); 118 compactEquals(candidates, 23, 12, 12); 119 } finally { 120 conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000 * 60 * 60 * 24); 121 conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F); 122 } 123 124 /* REFERENCES == file is from a region that was split */ 125 // treat storefiles that have references like a major compaction 126 compactEquals(sfCreate(true, 100, 50, 25, 12, 12), 100, 50, 25, 12, 12); 127 // reference files shouldn't obey max threshold 128 compactEquals(sfCreate(true, tooBig, 12, 12), tooBig, 12, 12); 129 // reference files should obey max file compact to avoid OOM 130 compactEquals(sfCreate(true, 7, 6, 5, 4, 3, 2, 1), 7, 6, 5, 4, 3); 131 132 // empty case 133 compactEquals(new ArrayList<>() /* empty */); 134 // empty case (because all files are too big) 135 compactEquals(sfCreate(tooBig, tooBig) /* empty */); 136 } 137 138 @Test 139 public void testOffPeakCompactionRatio() throws IOException { 140 /* 141 * NOTE: these tests are specific to describe the implementation of the current compaction 142 * algorithm. Developed to ensure that refactoring doesn't implicitly alter this. 143 */ 144 // set an off-peak compaction threshold 145 this.conf.setFloat("hbase.hstore.compaction.ratio.offpeak", 5.0F); 146 store.storeEngine.getCompactionPolicy().setConf(this.conf); 147 // Test with and without the flag. 148 compactEquals(sfCreate(999, 50, 12, 12, 1), false, true, 50, 12, 12, 1); 149 compactEquals(sfCreate(999, 50, 12, 12, 1), 12, 12, 1); 150 } 151 152 @Test 153 public void testStuckStoreCompaction() throws IOException { 154 // Select the smallest compaction if the store is stuck. 155 compactEquals(sfCreate(99, 99, 99, 99, 99, 99, 30, 30, 30, 30), 30, 30, 30); 156 // If not stuck, standard policy applies. 157 compactEquals(sfCreate(99, 99, 99, 99, 99, 30, 30, 30, 30), 99, 30, 30, 30, 30); 158 159 // Add sufficiently small files to compaction, though 160 compactEquals(sfCreate(99, 99, 99, 99, 99, 99, 30, 30, 30, 15), 30, 30, 30, 15); 161 // Prefer earlier compaction to latter if the benefit is not significant 162 compactEquals(sfCreate(99, 99, 99, 99, 30, 26, 26, 29, 25, 25), 30, 26, 26); 163 // Prefer later compaction if the benefit is significant. 164 compactEquals(sfCreate(99, 99, 99, 99, 27, 27, 27, 20, 20, 20), 20, 20, 20); 165 } 166 167 @Test 168 public void testCompactionEmptyHFile() throws IOException { 169 // Set TTL 170 ScanInfo oldScanInfo = store.getScanInfo(); 171 ScanInfo newScanInfo = 172 oldScanInfo.customize(oldScanInfo.getMaxVersions(), 600, oldScanInfo.getKeepDeletedCells()); 173 store.setScanInfo(newScanInfo); 174 // Do not compact empty store file 175 List<HStoreFile> candidates = sfCreate(0); 176 for (HStoreFile file : candidates) { 177 if (file instanceof MockHStoreFile) { 178 MockHStoreFile mockFile = (MockHStoreFile) file; 179 mockFile.setTimeRangeTracker(TimeRangeTracker.create(TimeRangeTracker.Type.SYNC, -1, -1)); 180 mockFile.setEntries(0); 181 } 182 } 183 // Test Default compactions 184 CompactionRequestImpl result = 185 ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()) 186 .selectCompaction(candidates, new ArrayList<>(), false, false, false); 187 Assert.assertTrue(result.getFiles().isEmpty()); 188 store.setScanInfo(oldScanInfo); 189 } 190}