001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.util; 019 020import java.io.IOException; 021import org.apache.hadoop.conf.Configuration; 022import org.apache.hadoop.hbase.CompoundConfiguration; 023import org.apache.hadoop.hbase.DoNotRetryIOException; 024import org.apache.hadoop.hbase.HConstants; 025import org.apache.hadoop.hbase.TableName; 026import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 027import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 028import org.apache.hadoop.hbase.client.TableDescriptor; 029import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 030import org.apache.hadoop.hbase.conf.ConfigKey; 031import org.apache.hadoop.hbase.fs.ErasureCodingUtils; 032import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; 033import org.apache.hadoop.hbase.regionserver.HStore; 034import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; 035import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy; 036import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy; 037import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy; 038import org.apache.yetus.audience.InterfaceAudience; 039import org.slf4j.Logger; 040import org.slf4j.LoggerFactory; 041 042import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; 043 044/** 045 * Only used for master to sanity check {@link org.apache.hadoop.hbase.client.TableDescriptor}. 046 */ 047@InterfaceAudience.Private 048public final class TableDescriptorChecker { 049 private static Logger LOG = LoggerFactory.getLogger(TableDescriptorChecker.class); 050 051 public static final String TABLE_SANITY_CHECKS = "hbase.table.sanity.checks"; 052 public static final boolean DEFAULT_TABLE_SANITY_CHECKS = true; 053 054 // should we check the compression codec type at master side, default true, HBASE-6370 055 public static final String MASTER_CHECK_COMPRESSION = "hbase.master.check.compression"; 056 public static final boolean DEFAULT_MASTER_CHECK_COMPRESSION = true; 057 058 // should we check encryption settings at master side, default true 059 public static final String MASTER_CHECK_ENCRYPTION = "hbase.master.check.encryption"; 060 public static final boolean DEFAULT_MASTER_CHECK_ENCRYPTION = true; 061 062 private TableDescriptorChecker() { 063 } 064 065 private static boolean shouldSanityCheck(final Configuration conf) { 066 if (conf.getBoolean(TABLE_SANITY_CHECKS, DEFAULT_TABLE_SANITY_CHECKS)) { 067 return true; 068 } 069 return false; 070 } 071 072 /** 073 * Checks whether the table conforms to some sane limits, and configured values (compression, etc) 074 * work. Throws an exception if something is wrong. 075 */ 076 public static void sanityCheck(final Configuration c, final TableDescriptor td) 077 throws IOException { 078 CompoundConfiguration conf = new CompoundConfiguration().add(c).addBytesMap(td.getValues()); 079 080 // Setting logs to warning instead of throwing exception if sanityChecks are disabled 081 boolean logWarn = !shouldSanityCheck(conf); 082 083 // Check value types 084 warnOrThrowExceptionForFailure(logWarn, () -> ConfigKey.validate(conf)); 085 warnOrThrowExceptionForFailure(logWarn, () -> { 086 for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { 087 ConfigKey.validate(new CompoundConfiguration().addBytesMap(cfd.getValues())); 088 } 089 }); 090 091 // check max file size 092 long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit 093 // if not set MAX_FILESIZE in TableDescriptor, and not set HREGION_MAX_FILESIZE in 094 // hbase-site.xml, use maxFileSizeLowerLimit instead to skip this check 095 long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null 096 ? conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit) 097 : Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE)); 098 if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) { 099 String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" 100 + maxFileSize + ") is too small, which might cause over splitting into unmanageable " 101 + "number of regions."; 102 warnOrThrowExceptionForFailure(logWarn, message, null); 103 } 104 105 // check flush size 106 long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit 107 // if not set MEMSTORE_FLUSHSIZE in TableDescriptor, and not set HREGION_MEMSTORE_FLUSH_SIZE in 108 // hbase-site.xml, use flushSizeLowerLimit instead to skip this check 109 long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null 110 ? conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit) 111 : Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE)); 112 if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) { 113 String message = 114 "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" (" 115 + flushSize + ") is too small, which might cause" + " very frequent flushing."; 116 warnOrThrowExceptionForFailure(logWarn, message, null); 117 } 118 119 // check that coprocessors and other specified plugin classes can be loaded 120 checkClassLoading(conf, td); 121 122 if (conf.getBoolean(MASTER_CHECK_COMPRESSION, DEFAULT_MASTER_CHECK_COMPRESSION)) { 123 // check compression can be loaded 124 checkCompression(conf, td); 125 } 126 127 if (conf.getBoolean(MASTER_CHECK_ENCRYPTION, DEFAULT_MASTER_CHECK_ENCRYPTION)) { 128 // check encryption can be loaded 129 checkEncryption(conf, td); 130 } 131 132 // Verify compaction policy 133 checkCompactionPolicy(conf, td); 134 // check that we have at least 1 CF 135 if (td.getColumnFamilyCount() == 0) { 136 String message = "Table should have at least one column family."; 137 warnOrThrowExceptionForFailure(logWarn, message, null); 138 } 139 140 // check that we have minimum 1 region replicas 141 int regionReplicas = td.getRegionReplication(); 142 if (regionReplicas < 1) { 143 String message = "Table region replication should be at least one."; 144 warnOrThrowExceptionForFailure(logWarn, message, null); 145 } 146 147 // Meta table shouldn't be set as read only, otherwise it will impact region assignments 148 if (td.isReadOnly() && TableName.isMetaTableName(td.getTableName())) { 149 warnOrThrowExceptionForFailure(false, "Meta table can't be set as read only.", null); 150 } 151 152 // check replication scope 153 checkReplicationScope(conf, td); 154 155 // check bloom filter type 156 checkBloomFilterType(conf, td); 157 158 if (td.getErasureCodingPolicy() != null) { 159 warnOrThrowExceptionForFailure(logWarn, 160 () -> ErasureCodingUtils.verifySupport(conf, td.getErasureCodingPolicy())); 161 } 162 163 for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) { 164 if (hcd.getTimeToLive() <= 0) { 165 String message = "TTL for column family " + hcd.getNameAsString() + " must be positive."; 166 warnOrThrowExceptionForFailure(logWarn, message, null); 167 } 168 169 // check blockSize 170 if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) { 171 String message = "Block size for column family " + hcd.getNameAsString() 172 + " must be between 1K and 16MB."; 173 warnOrThrowExceptionForFailure(logWarn, message, null); 174 } 175 176 // check versions 177 if (hcd.getMinVersions() < 0) { 178 String message = 179 "Min versions for column family " + hcd.getNameAsString() + " must be positive."; 180 warnOrThrowExceptionForFailure(logWarn, message, null); 181 } 182 // max versions already being checked 183 184 // HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor 185 // does not throw IllegalArgumentException 186 // check minVersions <= maxVerions 187 if (hcd.getMinVersions() > hcd.getMaxVersions()) { 188 String message = "Min versions for column family " + hcd.getNameAsString() 189 + " must be less than the Max versions."; 190 warnOrThrowExceptionForFailure(logWarn, message, null); 191 } 192 193 // check data replication factor, it can be 0(default value) when user has not explicitly 194 // set the value, in this case we use default replication factor set in the file system. 195 if (hcd.getDFSReplication() < 0) { 196 String message = "HFile Replication for column family " + hcd.getNameAsString() 197 + " must be greater than zero."; 198 warnOrThrowExceptionForFailure(logWarn, message, null); 199 } 200 201 // check in-memory compaction 202 warnOrThrowExceptionForFailure(logWarn, hcd::getInMemoryCompaction); 203 } 204 } 205 206 private static void checkReplicationScope(final Configuration conf, final TableDescriptor td) 207 throws IOException { 208 warnOrThrowExceptionForFailure(conf, () -> { 209 for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { 210 // check replication scope 211 WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(cfd.getScope()); 212 if (scop == null) { 213 String message = "Replication scope for column family " + cfd.getNameAsString() + " is " 214 + cfd.getScope() + " which is invalid."; 215 216 throw new DoNotRetryIOException(message); 217 } 218 } 219 }); 220 } 221 222 private static void checkCompactionPolicy(final Configuration conf, final TableDescriptor td) 223 throws IOException { 224 warnOrThrowExceptionForFailure(false, () -> { 225 // FIFO compaction has some requirements 226 // Actually FCP ignores periodic major compactions 227 String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY); 228 if (className == null) { 229 className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 230 ExploringCompactionPolicy.class.getName()); 231 } 232 233 int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT; 234 String sv = td.getValue(HStore.BLOCKING_STOREFILES_KEY); 235 if (sv != null) { 236 blockingFileCount = Integer.parseInt(sv); 237 } else { 238 blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount); 239 } 240 241 for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) { 242 String compactionPolicy = 243 hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY); 244 if (compactionPolicy == null) { 245 compactionPolicy = className; 246 } 247 if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) { 248 continue; 249 } 250 // FIFOCompaction 251 String message = null; 252 253 // 1. Check TTL 254 if (hcd.getTimeToLive() == ColumnFamilyDescriptorBuilder.DEFAULT_TTL) { 255 message = "Default TTL is not supported for FIFO compaction"; 256 throw new IOException(message); 257 } 258 259 // 2. Check min versions 260 if (hcd.getMinVersions() > 0) { 261 message = "MIN_VERSION > 0 is not supported for FIFO compaction"; 262 throw new IOException(message); 263 } 264 265 // 3. blocking file count 266 sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY); 267 if (sv != null) { 268 blockingFileCount = Integer.parseInt(sv); 269 } 270 if (blockingFileCount < 1000) { 271 message = 272 "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount 273 + " is below recommended minimum of 1000 for column family " + hcd.getNameAsString(); 274 throw new IOException(message); 275 } 276 } 277 }); 278 } 279 280 private static void checkBloomFilterType(final Configuration conf, final TableDescriptor td) 281 throws IOException { 282 warnOrThrowExceptionForFailure(conf, () -> { 283 for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { 284 Configuration cfdConf = new CompoundConfiguration().addStringMap(cfd.getConfiguration()); 285 try { 286 BloomFilterUtil.getBloomFilterParam(cfd.getBloomFilterType(), cfdConf); 287 } catch (IllegalArgumentException e) { 288 throw new DoNotRetryIOException("Failed to get bloom filter param", e); 289 } 290 } 291 }); 292 } 293 294 public static void checkCompression(final Configuration conf, final TableDescriptor td) 295 throws IOException { 296 warnOrThrowExceptionForFailure(conf, () -> { 297 for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { 298 CompressionTest.testCompression(cfd.getCompressionType()); 299 CompressionTest.testCompression(cfd.getCompactionCompressionType()); 300 CompressionTest.testCompression(cfd.getMajorCompactionCompressionType()); 301 CompressionTest.testCompression(cfd.getMinorCompactionCompressionType()); 302 } 303 }); 304 } 305 306 public static void checkEncryption(final Configuration conf, final TableDescriptor td) 307 throws IOException { 308 warnOrThrowExceptionForFailure(conf, () -> { 309 for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { 310 EncryptionTest.testEncryption(conf, cfd.getEncryptionType(), cfd.getEncryptionKey()); 311 } 312 }); 313 } 314 315 public static void checkClassLoading(final Configuration conf, final TableDescriptor td) 316 throws IOException { 317 warnOrThrowExceptionForFailure(conf, () -> { 318 RegionSplitPolicy.getSplitPolicyClass(td, conf); 319 RegionCoprocessorHost.testTableCoprocessorAttrs(conf, td); 320 }); 321 } 322 323 // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled. 324 private static void warnOrThrowExceptionForFailure(boolean logWarn, String message, 325 Exception cause) throws IOException { 326 if (!logWarn) { 327 throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS 328 + " to false at conf or table descriptor if you want to bypass sanity checks", cause); 329 } 330 LOG.warn(message); 331 } 332 333 private static void warnOrThrowExceptionForFailure(Configuration conf, ThrowingRunnable runnable) 334 throws IOException { 335 boolean logWarn = !shouldSanityCheck(conf); 336 warnOrThrowExceptionForFailure(logWarn, runnable); 337 } 338 339 private static void warnOrThrowExceptionForFailure(boolean logWarn, ThrowingRunnable runnable) 340 throws IOException { 341 try { 342 runnable.run(); 343 } catch (Exception e) { 344 warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e); 345 } 346 } 347 348 @FunctionalInterface 349 interface ThrowingRunnable { 350 void run() throws Exception; 351 } 352}