001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io; 019 020import java.io.BufferedInputStream; 021import java.io.DataInput; 022import java.io.DataInputStream; 023import java.io.IOException; 024import java.io.InputStream; 025import java.util.Arrays; 026import org.apache.commons.io.IOUtils; 027import org.apache.hadoop.fs.FSDataOutputStream; 028import org.apache.hadoop.fs.FileSystem; 029import org.apache.hadoop.fs.Path; 030import org.apache.hadoop.hbase.KeyValueUtil; 031import org.apache.hadoop.hbase.util.Bytes; 032import org.apache.yetus.audience.InterfaceAudience; 033 034import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; 035 036import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 037import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos; 038 039/** 040 * A reference to the top or bottom half of a store file where 'bottom' is the first half of the 041 * file containing the keys that sort lowest and 'top' is the second half of the file with keys that 042 * sort greater than those of the bottom half. The file referenced lives under a different region. 043 * References are made at region split time. 044 * <p> 045 * References work with a special half store file type. References know how to write out the 046 * reference format in the file system and are what is juggled when references are mixed in with 047 * direct store files. The half store file type is used reading the referred to file. 048 * <p> 049 * References to store files located over in some other region look like this in the file system 050 * <code>1278437856009925445.3323223323</code>: i.e. an id followed by hash of the referenced 051 * region. Note, a region is itself not splittable if it has instances of store file references. 052 * References are cleaned up by compactions. 053 */ 054@InterfaceAudience.Private 055public class Reference { 056 private byte[] splitkey; 057 private Range region; 058 059 /** 060 * For split HStoreFiles, it specifies if the file covers the lower half or the upper half of the 061 * key range 062 */ 063 static enum Range { 064 /** HStoreFile contains upper half of key range */ 065 top, 066 /** HStoreFile contains lower half of key range */ 067 bottom 068 } 069 070 /** Returns A {@link Reference} that points at top half of a an hfile */ 071 public static Reference createTopReference(final byte[] splitRow) { 072 return new Reference(splitRow, Range.top); 073 } 074 075 /** Returns A {@link Reference} that points at the bottom half of a an hfile */ 076 public static Reference createBottomReference(final byte[] splitRow) { 077 return new Reference(splitRow, Range.bottom); 078 } 079 080 /** 081 * Constructor 082 * @param splitRow This is row we are splitting around. 083 */ 084 Reference(final byte[] splitRow, final Range fr) { 085 this.splitkey = splitRow == null ? null : KeyValueUtil.createFirstOnRow(splitRow).getKey(); 086 this.region = fr; 087 } 088 089 /** 090 * Used by serializations. 091 * @deprecated need by pb serialization 092 */ 093 @Deprecated 094 // Make this private when it comes time to let go of this constructor. 095 // Needed by pb serialization. 096 public Reference() { 097 this(null, Range.bottom); 098 } 099 100 /** 101 * */ 102 public Range getFileRegion() { 103 return this.region; 104 } 105 106 /** 107 * */ 108 public byte[] getSplitKey() { 109 return splitkey; 110 } 111 112 /** 113 * @see java.lang.Object#toString() 114 */ 115 @Override 116 public String toString() { 117 return "" + this.region; 118 } 119 120 public static boolean isTopFileRegion(final Range r) { 121 return r.equals(Range.top); 122 } 123 124 /** 125 * @deprecated Writables are going away. Use the pb serialization methods instead. Remove in a 126 * release after 0.96 goes out. This is here only to migrate old Reference files 127 * written with Writables before 0.96. 128 */ 129 @Deprecated 130 public void readFields(DataInput in) throws IOException { 131 boolean tmp = in.readBoolean(); 132 // If true, set region to top. 133 this.region = tmp ? Range.top : Range.bottom; 134 this.splitkey = Bytes.readByteArray(in); 135 } 136 137 public Path write(final FileSystem fs, final Path p) throws IOException { 138 FSDataOutputStream out = fs.create(p, false); 139 try { 140 out.write(toByteArray()); 141 } finally { 142 out.close(); 143 } 144 return p; 145 } 146 147 /** 148 * Read a Reference from FileSystem. 149 * @return New Reference made from passed <code>p</code> 150 */ 151 public static Reference read(final FileSystem fs, final Path p) throws IOException { 152 InputStream in = fs.open(p); 153 try { 154 // I need to be able to move back in the stream if this is not a pb serialization so I can 155 // do the Writable decoding instead. 156 in = in.markSupported() ? in : new BufferedInputStream(in); 157 int pblen = ProtobufUtil.lengthOfPBMagic(); 158 in.mark(pblen); 159 byte[] pbuf = new byte[pblen]; 160 IOUtils.readFully(in, pbuf, 0, pblen); 161 // WATCHOUT! Return in middle of function!!! 162 if (ProtobufUtil.isPBMagicPrefix(pbuf)) return convert(FSProtos.Reference.parseFrom(in)); 163 // Else presume Writables. Need to reset the stream since it didn't start w/ pb. 164 // We won't bother rewriting thie Reference as a pb since Reference is transitory. 165 in.reset(); 166 Reference r = new Reference(); 167 DataInputStream dis = new DataInputStream(in); 168 // Set in = dis so it gets the close below in the finally on our way out. 169 in = dis; 170 r.readFields(dis); 171 return r; 172 } finally { 173 in.close(); 174 } 175 } 176 177 public FSProtos.Reference convert() { 178 FSProtos.Reference.Builder builder = FSProtos.Reference.newBuilder(); 179 builder.setRange(isTopFileRegion(getFileRegion()) 180 ? FSProtos.Reference.Range.TOP 181 : FSProtos.Reference.Range.BOTTOM); 182 builder.setSplitkey(UnsafeByteOperations.unsafeWrap(getSplitKey())); 183 return builder.build(); 184 } 185 186 public static Reference convert(final FSProtos.Reference r) { 187 Reference result = new Reference(); 188 result.splitkey = r.getSplitkey().toByteArray(); 189 result.region = r.getRange() == FSProtos.Reference.Range.TOP ? Range.top : Range.bottom; 190 return result; 191 } 192 193 /** 194 * Use this when writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the 195 * delimiter, pb reads to EOF which may not be what you want). 196 * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. 197 */ 198 byte[] toByteArray() throws IOException { 199 return ProtobufUtil.prependPBMagic(convert().toByteArray()); 200 } 201 202 @Override 203 public int hashCode() { 204 return Arrays.hashCode(splitkey) + region.hashCode(); 205 } 206 207 @Override 208 public boolean equals(Object o) { 209 if (this == o) return true; 210 if (o == null) return false; 211 if (!(o instanceof Reference)) return false; 212 213 Reference r = (Reference) o; 214 if (splitkey != null && r.splitkey == null) return false; 215 if (splitkey == null && r.splitkey != null) return false; 216 if (splitkey != null && !Arrays.equals(splitkey, r.splitkey)) return false; 217 218 return region.equals(r.region); 219 } 220}