001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import java.io.IOException; 021import java.net.UnknownHostException; 022import java.util.Arrays; 023import java.util.Map; 024import java.util.Optional; 025import java.util.OptionalLong; 026import java.util.TreeMap; 027import org.apache.hadoop.fs.Path; 028import org.apache.hadoop.hbase.Cell; 029import org.apache.hadoop.hbase.CellBuilderType; 030import org.apache.hadoop.hbase.ExtendedCell; 031import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; 032import org.apache.hadoop.hbase.HBaseTestingUtil; 033import org.apache.hadoop.hbase.HDFSBlocksDistribution; 034import org.apache.hadoop.hbase.io.hfile.CacheConfig; 035import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; 036import org.apache.hadoop.hbase.util.Bytes; 037import org.apache.hadoop.hbase.util.DNS; 038import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 039import org.apache.yetus.audience.InterfaceAudience; 040 041/** A mock used so our tests don't deal with actual StoreFiles */ 042@InterfaceAudience.Private 043public class MockHStoreFile extends HStoreFile { 044 long length = 0; 045 boolean isRef = false; 046 long ageInDisk; 047 long sequenceid; 048 private Map<byte[], byte[]> metadata = new TreeMap<>(Bytes.BYTES_COMPARATOR); 049 byte[] splitPoint = null; 050 TimeRangeTracker timeRangeTracker; 051 long entryCount; 052 boolean isMajor; 053 HDFSBlocksDistribution hdfsBlocksDistribution; 054 long modificationTime; 055 boolean compactedAway; 056 057 MockHStoreFile(HBaseTestingUtil testUtil, Path testPath, long length, long ageInDisk, 058 boolean isRef, long sequenceid, StoreFileInfo storeFileInfo) throws IOException { 059 super(storeFileInfo, BloomType.NONE, new CacheConfig(testUtil.getConfiguration())); 060 setMockHStoreFileVals(length, isRef, ageInDisk, sequenceid, isMajor, testUtil); 061 } 062 063 private void setMockHStoreFileVals(long length, boolean isRef, long ageInDisk, long sequenceid, 064 boolean isMajor, HBaseTestingUtil testUtil) throws UnknownHostException { 065 this.length = length; 066 this.isRef = isRef; 067 this.ageInDisk = ageInDisk; 068 this.sequenceid = sequenceid; 069 this.isMajor = false; 070 hdfsBlocksDistribution = new HDFSBlocksDistribution(); 071 hdfsBlocksDistribution.addHostsAndBlockWeight( 072 new String[] { DNS.getHostname(testUtil.getConfiguration(), DNS.ServerType.REGIONSERVER) }, 073 1); 074 modificationTime = EnvironmentEdgeManager.currentTime(); 075 } 076 077 MockHStoreFile(HBaseTestingUtil testUtil, Path testPath, long length, long ageInDisk, 078 boolean isRef, long sequenceid, StoreFileTracker tracker) throws IOException { 079 super(testUtil.getTestFileSystem(), testPath, testUtil.getConfiguration(), 080 new CacheConfig(testUtil.getConfiguration()), BloomType.NONE, true, tracker); 081 setMockHStoreFileVals(length, isRef, ageInDisk, sequenceid, isMajor, testUtil); 082 } 083 084 void setLength(long newLen) { 085 this.length = newLen; 086 } 087 088 @Override 089 public long getMaxSequenceId() { 090 return sequenceid; 091 } 092 093 @Override 094 public boolean isMajorCompactionResult() { 095 return isMajor; 096 } 097 098 public void setIsMajor(boolean isMajor) { 099 this.isMajor = isMajor; 100 } 101 102 @Override 103 public boolean isReference() { 104 return this.isRef; 105 } 106 107 @Override 108 public boolean isBulkLoadResult() { 109 return false; 110 } 111 112 @Override 113 public byte[] getMetadataValue(byte[] key) { 114 return this.metadata.get(key); 115 } 116 117 public void setMetadataValue(byte[] key, byte[] value) { 118 this.metadata.put(key, value); 119 } 120 121 void setTimeRangeTracker(TimeRangeTracker timeRangeTracker) { 122 this.timeRangeTracker = timeRangeTracker; 123 } 124 125 void setEntries(long entryCount) { 126 this.entryCount = entryCount; 127 } 128 129 @Override 130 public OptionalLong getMinimumTimestamp() { 131 return timeRangeTracker == null 132 ? OptionalLong.empty() 133 : OptionalLong.of(timeRangeTracker.getMin()); 134 } 135 136 @Override 137 public OptionalLong getMaximumTimestamp() { 138 return timeRangeTracker == null 139 ? OptionalLong.empty() 140 : OptionalLong.of(timeRangeTracker.getMax()); 141 } 142 143 @Override 144 public void markCompactedAway() { 145 this.compactedAway = true; 146 } 147 148 @Override 149 public boolean isCompactedAway() { 150 return compactedAway; 151 } 152 153 @Override 154 public long getModificationTimestamp() { 155 return modificationTime; 156 } 157 158 @Override 159 public HDFSBlocksDistribution getHDFSBlockDistribution() { 160 return hdfsBlocksDistribution; 161 } 162 163 @Override 164 public void initReader() throws IOException { 165 } 166 167 @Override 168 public StoreFileScanner getPreadScanner(boolean cacheBlocks, long readPt, long scannerOrder, 169 boolean canOptimizeForNonNullColumn) { 170 return getReader().getStoreFileScanner(cacheBlocks, true, false, readPt, scannerOrder, 171 canOptimizeForNonNullColumn); 172 } 173 174 @Override 175 public StoreFileScanner getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks, 176 boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) 177 throws IOException { 178 return getReader().getStoreFileScanner(cacheBlocks, false, isCompaction, readPt, scannerOrder, 179 canOptimizeForNonNullColumn); 180 } 181 182 @Override 183 public StoreFileReader getReader() { 184 final long len = this.length; 185 final TimeRangeTracker timeRangeTracker = this.timeRangeTracker; 186 final long entries = this.entryCount; 187 return new StoreFileReader() { 188 @Override 189 public long length() { 190 return len; 191 } 192 193 @Override 194 public long getMaxTimestamp() { 195 return timeRange == null ? Long.MAX_VALUE : timeRangeTracker.getMax(); 196 } 197 198 @Override 199 public long getEntries() { 200 return entries; 201 } 202 203 @Override 204 public void close(boolean evictOnClose) throws IOException { 205 // no-op 206 } 207 208 @Override 209 public Optional<ExtendedCell> getLastKey() { 210 if (splitPoint != null) { 211 return Optional 212 .of(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setType(Cell.Type.Put) 213 .setRow(Arrays.copyOf(splitPoint, splitPoint.length + 1)).build()); 214 } else { 215 return Optional.empty(); 216 } 217 } 218 219 @Override 220 public Optional<ExtendedCell> midKey() throws IOException { 221 if (splitPoint != null) { 222 return Optional.of(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) 223 .setType(Cell.Type.Put).setRow(splitPoint).build()); 224 } else { 225 return Optional.empty(); 226 } 227 } 228 229 @Override 230 public Optional<ExtendedCell> getFirstKey() { 231 if (splitPoint != null) { 232 return Optional.of(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) 233 .setType(Cell.Type.Put).setRow(splitPoint, 0, splitPoint.length - 1).build()); 234 } else { 235 return Optional.empty(); 236 } 237 } 238 }; 239 } 240 241 @Override 242 public OptionalLong getBulkLoadTimestamp() { 243 // we always return false for isBulkLoadResult so we do not have a bulk load timestamp 244 return OptionalLong.empty(); 245 } 246}