001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.util;
019
020import java.io.IOException;
021import org.apache.hadoop.conf.Configuration;
022import org.apache.hadoop.hbase.CompoundConfiguration;
023import org.apache.hadoop.hbase.DoNotRetryIOException;
024import org.apache.hadoop.hbase.HConstants;
025import org.apache.hadoop.hbase.TableName;
026import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
027import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
028import org.apache.hadoop.hbase.client.TableDescriptor;
029import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
030import org.apache.hadoop.hbase.conf.ConfigKey;
031import org.apache.hadoop.hbase.fs.ErasureCodingUtils;
032import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
033import org.apache.hadoop.hbase.regionserver.HStore;
034import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
035import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
036import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
037import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
038import org.apache.yetus.audience.InterfaceAudience;
039import org.slf4j.Logger;
040import org.slf4j.LoggerFactory;
041
042import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
043
044/**
045 * Only used for master to sanity check {@link org.apache.hadoop.hbase.client.TableDescriptor}.
046 */
047@InterfaceAudience.Private
048public final class TableDescriptorChecker {
049  private static Logger LOG = LoggerFactory.getLogger(TableDescriptorChecker.class);
050
051  public static final String TABLE_SANITY_CHECKS = "hbase.table.sanity.checks";
052  public static final boolean DEFAULT_TABLE_SANITY_CHECKS = true;
053
054  // should we check the compression codec type at master side, default true, HBASE-6370
055  public static final String MASTER_CHECK_COMPRESSION = "hbase.master.check.compression";
056  public static final boolean DEFAULT_MASTER_CHECK_COMPRESSION = true;
057
058  // should we check encryption settings at master side, default true
059  public static final String MASTER_CHECK_ENCRYPTION = "hbase.master.check.encryption";
060  public static final boolean DEFAULT_MASTER_CHECK_ENCRYPTION = true;
061
062  private TableDescriptorChecker() {
063  }
064
065  private static boolean shouldSanityCheck(final Configuration conf) {
066    if (conf.getBoolean(TABLE_SANITY_CHECKS, DEFAULT_TABLE_SANITY_CHECKS)) {
067      return true;
068    }
069    return false;
070  }
071
072  /**
073   * Checks whether the table conforms to some sane limits, and configured values (compression, etc)
074   * work. Throws an exception if something is wrong.
075   */
076  public static void sanityCheck(final Configuration c, final TableDescriptor td)
077    throws IOException {
078    CompoundConfiguration conf = new CompoundConfiguration().add(c).addBytesMap(td.getValues());
079
080    // Setting logs to warning instead of throwing exception if sanityChecks are disabled
081    boolean logWarn = !shouldSanityCheck(conf);
082
083    // Check value types
084    warnOrThrowExceptionForFailure(logWarn, () -> ConfigKey.validate(conf));
085    warnOrThrowExceptionForFailure(logWarn, () -> {
086      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
087        ConfigKey.validate(new CompoundConfiguration().addStringMap(cfd.getConfiguration())
088          .addBytesMap(cfd.getValues()));
089      }
090    });
091
092    // check max file size
093    long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit
094    // if not set MAX_FILESIZE in TableDescriptor, and not set HREGION_MAX_FILESIZE in
095    // hbase-site.xml, use maxFileSizeLowerLimit instead to skip this check
096    long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null
097      ? conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit)
098      : Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE));
099    if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
100      String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" ("
101        + maxFileSize + ") is too small, which might cause over splitting into unmanageable "
102        + "number of regions.";
103      warnOrThrowExceptionForFailure(logWarn, message, null);
104    }
105
106    // check flush size
107    long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit
108    // if not set MEMSTORE_FLUSHSIZE in TableDescriptor, and not set HREGION_MEMSTORE_FLUSH_SIZE in
109    // hbase-site.xml, use flushSizeLowerLimit instead to skip this check
110    long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null
111      ? conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit)
112      : Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE));
113    if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
114      String message =
115        "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" ("
116          + flushSize + ") is too small, which might cause" + " very frequent flushing.";
117      warnOrThrowExceptionForFailure(logWarn, message, null);
118    }
119
120    // check that coprocessors and other specified plugin classes can be loaded
121    checkClassLoading(conf, td);
122
123    if (conf.getBoolean(MASTER_CHECK_COMPRESSION, DEFAULT_MASTER_CHECK_COMPRESSION)) {
124      // check compression can be loaded
125      checkCompression(conf, td);
126    }
127
128    if (conf.getBoolean(MASTER_CHECK_ENCRYPTION, DEFAULT_MASTER_CHECK_ENCRYPTION)) {
129      // check encryption can be loaded
130      checkEncryption(conf, td);
131    }
132
133    // Verify compaction policy
134    checkCompactionPolicy(conf, td);
135    // check that we have at least 1 CF
136    if (td.getColumnFamilyCount() == 0) {
137      String message = "Table should have at least one column family.";
138      warnOrThrowExceptionForFailure(logWarn, message, null);
139    }
140
141    // check that we have minimum 1 region replicas
142    int regionReplicas = td.getRegionReplication();
143    if (regionReplicas < 1) {
144      String message = "Table region replication should be at least one.";
145      warnOrThrowExceptionForFailure(logWarn, message, null);
146    }
147
148    // Meta table shouldn't be set as read only, otherwise it will impact region assignments
149    if (td.isReadOnly() && TableName.isMetaTableName(td.getTableName())) {
150      warnOrThrowExceptionForFailure(false, "Meta table can't be set as read only.", null);
151    }
152
153    // check replication scope
154    checkReplicationScope(conf, td);
155
156    // check bloom filter type
157    checkBloomFilterType(conf, td);
158
159    if (td.getErasureCodingPolicy() != null) {
160      warnOrThrowExceptionForFailure(logWarn,
161        () -> ErasureCodingUtils.verifySupport(conf, td.getErasureCodingPolicy()));
162    }
163
164    for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
165      if (hcd.getTimeToLive() <= 0) {
166        String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
167        warnOrThrowExceptionForFailure(logWarn, message, null);
168      }
169
170      // check blockSize
171      if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
172        String message = "Block size for column family " + hcd.getNameAsString()
173          + "  must be between 1K and 16MB.";
174        warnOrThrowExceptionForFailure(logWarn, message, null);
175      }
176
177      // check versions
178      if (hcd.getMinVersions() < 0) {
179        String message =
180          "Min versions for column family " + hcd.getNameAsString() + "  must be positive.";
181        warnOrThrowExceptionForFailure(logWarn, message, null);
182      }
183      // max versions already being checked
184
185      // HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor
186      // does not throw IllegalArgumentException
187      // check minVersions <= maxVerions
188      if (hcd.getMinVersions() > hcd.getMaxVersions()) {
189        String message = "Min versions for column family " + hcd.getNameAsString()
190          + " must be less than the Max versions.";
191        warnOrThrowExceptionForFailure(logWarn, message, null);
192      }
193
194      // check data replication factor, it can be 0(default value) when user has not explicitly
195      // set the value, in this case we use default replication factor set in the file system.
196      if (hcd.getDFSReplication() < 0) {
197        String message = "HFile Replication for column family " + hcd.getNameAsString()
198          + "  must be greater than zero.";
199        warnOrThrowExceptionForFailure(logWarn, message, null);
200      }
201
202      // check in-memory compaction
203      warnOrThrowExceptionForFailure(logWarn, hcd::getInMemoryCompaction);
204    }
205  }
206
207  private static void checkReplicationScope(final Configuration conf, final TableDescriptor td)
208    throws IOException {
209    warnOrThrowExceptionForFailure(conf, () -> {
210      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
211        // check replication scope
212        WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(cfd.getScope());
213        if (scop == null) {
214          String message = "Replication scope for column family " + cfd.getNameAsString() + " is "
215            + cfd.getScope() + " which is invalid.";
216
217          throw new DoNotRetryIOException(message);
218        }
219      }
220    });
221  }
222
223  private static void checkCompactionPolicy(final Configuration conf, final TableDescriptor td)
224    throws IOException {
225    warnOrThrowExceptionForFailure(false, () -> {
226      // FIFO compaction has some requirements
227      // Actually FCP ignores periodic major compactions
228      String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
229      if (className == null) {
230        className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
231          ExploringCompactionPolicy.class.getName());
232      }
233
234      int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT;
235      String sv = td.getValue(HStore.BLOCKING_STOREFILES_KEY);
236      if (sv != null) {
237        blockingFileCount = Integer.parseInt(sv);
238      } else {
239        blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
240      }
241
242      for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
243        String compactionPolicy =
244          hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
245        if (compactionPolicy == null) {
246          compactionPolicy = className;
247        }
248        if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) {
249          continue;
250        }
251        // FIFOCompaction
252        String message = null;
253
254        // 1. Check TTL
255        if (hcd.getTimeToLive() == ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
256          message = "Default TTL is not supported for FIFO compaction";
257          throw new IOException(message);
258        }
259
260        // 2. Check min versions
261        if (hcd.getMinVersions() > 0) {
262          message = "MIN_VERSION > 0 is not supported for FIFO compaction";
263          throw new IOException(message);
264        }
265
266        // 3. blocking file count
267        sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
268        if (sv != null) {
269          blockingFileCount = Integer.parseInt(sv);
270        }
271        if (blockingFileCount < 1000) {
272          message =
273            "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount
274              + " is below recommended minimum of 1000 for column family " + hcd.getNameAsString();
275          throw new IOException(message);
276        }
277      }
278    });
279  }
280
281  private static void checkBloomFilterType(final Configuration conf, final TableDescriptor td)
282    throws IOException {
283    warnOrThrowExceptionForFailure(conf, () -> {
284      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
285        Configuration cfdConf = new CompoundConfiguration().addStringMap(cfd.getConfiguration());
286        try {
287          BloomFilterUtil.getBloomFilterParam(cfd.getBloomFilterType(), cfdConf);
288        } catch (IllegalArgumentException e) {
289          throw new DoNotRetryIOException("Failed to get bloom filter param", e);
290        }
291      }
292    });
293  }
294
295  public static void checkCompression(final Configuration conf, final TableDescriptor td)
296    throws IOException {
297    warnOrThrowExceptionForFailure(conf, () -> {
298      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
299        CompressionTest.testCompression(cfd.getCompressionType());
300        CompressionTest.testCompression(cfd.getCompactionCompressionType());
301        CompressionTest.testCompression(cfd.getMajorCompactionCompressionType());
302        CompressionTest.testCompression(cfd.getMinorCompactionCompressionType());
303      }
304    });
305  }
306
307  public static void checkEncryption(final Configuration conf, final TableDescriptor td)
308    throws IOException {
309    warnOrThrowExceptionForFailure(conf, () -> {
310      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
311        EncryptionTest.testEncryption(conf, cfd.getEncryptionType(), cfd.getEncryptionKey());
312      }
313    });
314  }
315
316  public static void checkClassLoading(final Configuration conf, final TableDescriptor td)
317    throws IOException {
318    warnOrThrowExceptionForFailure(conf, () -> {
319      RegionSplitPolicy.getSplitPolicyClass(td, conf);
320      RegionCoprocessorHost.testTableCoprocessorAttrs(conf, td);
321    });
322  }
323
324  // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled.
325  private static void warnOrThrowExceptionForFailure(boolean logWarn, String message,
326    Exception cause) throws IOException {
327    if (!logWarn) {
328      throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS
329        + " to false at conf or table descriptor if you want to bypass sanity checks", cause);
330    }
331    LOG.warn(message);
332  }
333
334  private static void warnOrThrowExceptionForFailure(Configuration conf, ThrowingRunnable runnable)
335    throws IOException {
336    boolean logWarn = !shouldSanityCheck(conf);
337    warnOrThrowExceptionForFailure(logWarn, runnable);
338  }
339
340  private static void warnOrThrowExceptionForFailure(boolean logWarn, ThrowingRunnable runnable)
341    throws IOException {
342    try {
343      runnable.run();
344    } catch (Exception e) {
345      warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
346    }
347  }
348
349  @FunctionalInterface
350  interface ThrowingRunnable {
351    void run() throws Exception;
352  }
353}