001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.util;
019
020import java.io.IOException;
021import org.apache.hadoop.conf.Configuration;
022import org.apache.hadoop.hbase.CompoundConfiguration;
023import org.apache.hadoop.hbase.DoNotRetryIOException;
024import org.apache.hadoop.hbase.HConstants;
025import org.apache.hadoop.hbase.TableName;
026import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
027import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
028import org.apache.hadoop.hbase.client.TableDescriptor;
029import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
030import org.apache.hadoop.hbase.fs.ErasureCodingUtils;
031import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
032import org.apache.hadoop.hbase.regionserver.HStore;
033import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
034import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
035import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
036import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
037import org.apache.yetus.audience.InterfaceAudience;
038import org.slf4j.Logger;
039import org.slf4j.LoggerFactory;
040
041import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
042
043/**
044 * Only used for master to sanity check {@link org.apache.hadoop.hbase.client.TableDescriptor}.
045 */
046@InterfaceAudience.Private
047public final class TableDescriptorChecker {
048  private static Logger LOG = LoggerFactory.getLogger(TableDescriptorChecker.class);
049
050  public static final String TABLE_SANITY_CHECKS = "hbase.table.sanity.checks";
051  public static final boolean DEFAULT_TABLE_SANITY_CHECKS = true;
052
053  // should we check the compression codec type at master side, default true, HBASE-6370
054  public static final String MASTER_CHECK_COMPRESSION = "hbase.master.check.compression";
055  public static final boolean DEFAULT_MASTER_CHECK_COMPRESSION = true;
056
057  // should we check encryption settings at master side, default true
058  public static final String MASTER_CHECK_ENCRYPTION = "hbase.master.check.encryption";
059  public static final boolean DEFAULT_MASTER_CHECK_ENCRYPTION = true;
060
061  private TableDescriptorChecker() {
062  }
063
064  private static boolean shouldSanityCheck(final Configuration conf) {
065    if (conf.getBoolean(TABLE_SANITY_CHECKS, DEFAULT_TABLE_SANITY_CHECKS)) {
066      return true;
067    }
068    return false;
069  }
070
071  /**
072   * Checks whether the table conforms to some sane limits, and configured values (compression, etc)
073   * work. Throws an exception if something is wrong.
074   */
075  public static void sanityCheck(final Configuration c, final TableDescriptor td)
076    throws IOException {
077    CompoundConfiguration conf = new CompoundConfiguration().add(c).addBytesMap(td.getValues());
078
079    // Setting logs to warning instead of throwing exception if sanityChecks are disabled
080    boolean logWarn = !shouldSanityCheck(conf);
081
082    // check max file size
083    long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit
084    // if not set MAX_FILESIZE in TableDescriptor, and not set HREGION_MAX_FILESIZE in
085    // hbase-site.xml, use maxFileSizeLowerLimit instead to skip this check
086    long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null
087      ? conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit)
088      : Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE));
089    if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
090      String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" ("
091        + maxFileSize + ") is too small, which might cause over splitting into unmanageable "
092        + "number of regions.";
093      warnOrThrowExceptionForFailure(logWarn, message, null);
094    }
095
096    // check flush size
097    long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit
098    // if not set MEMSTORE_FLUSHSIZE in TableDescriptor, and not set HREGION_MEMSTORE_FLUSH_SIZE in
099    // hbase-site.xml, use flushSizeLowerLimit instead to skip this check
100    long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null
101      ? conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit)
102      : Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE));
103    if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
104      String message =
105        "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" ("
106          + flushSize + ") is too small, which might cause" + " very frequent flushing.";
107      warnOrThrowExceptionForFailure(logWarn, message, null);
108    }
109
110    // check that coprocessors and other specified plugin classes can be loaded
111    checkClassLoading(conf, td);
112
113    if (conf.getBoolean(MASTER_CHECK_COMPRESSION, DEFAULT_MASTER_CHECK_COMPRESSION)) {
114      // check compression can be loaded
115      checkCompression(conf, td);
116    }
117
118    if (conf.getBoolean(MASTER_CHECK_ENCRYPTION, DEFAULT_MASTER_CHECK_ENCRYPTION)) {
119      // check encryption can be loaded
120      checkEncryption(conf, td);
121    }
122
123    // Verify compaction policy
124    checkCompactionPolicy(conf, td);
125    // check that we have at least 1 CF
126    if (td.getColumnFamilyCount() == 0) {
127      String message = "Table should have at least one column family.";
128      warnOrThrowExceptionForFailure(logWarn, message, null);
129    }
130
131    // check that we have minimum 1 region replicas
132    int regionReplicas = td.getRegionReplication();
133    if (regionReplicas < 1) {
134      String message = "Table region replication should be at least one.";
135      warnOrThrowExceptionForFailure(logWarn, message, null);
136    }
137
138    // Meta table shouldn't be set as read only, otherwise it will impact region assignments
139    if (td.isReadOnly() && TableName.isMetaTableName(td.getTableName())) {
140      warnOrThrowExceptionForFailure(false, "Meta table can't be set as read only.", null);
141    }
142
143    // check replication scope
144    checkReplicationScope(conf, td);
145
146    // check bloom filter type
147    checkBloomFilterType(conf, td);
148
149    if (td.getErasureCodingPolicy() != null) {
150      warnOrThrowExceptionForFailure(logWarn,
151        () -> ErasureCodingUtils.verifySupport(conf, td.getErasureCodingPolicy()));
152    }
153
154    for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
155      if (hcd.getTimeToLive() <= 0) {
156        String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
157        warnOrThrowExceptionForFailure(logWarn, message, null);
158      }
159
160      // check blockSize
161      if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
162        String message = "Block size for column family " + hcd.getNameAsString()
163          + "  must be between 1K and 16MB.";
164        warnOrThrowExceptionForFailure(logWarn, message, null);
165      }
166
167      // check versions
168      if (hcd.getMinVersions() < 0) {
169        String message =
170          "Min versions for column family " + hcd.getNameAsString() + "  must be positive.";
171        warnOrThrowExceptionForFailure(logWarn, message, null);
172      }
173      // max versions already being checked
174
175      // HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor
176      // does not throw IllegalArgumentException
177      // check minVersions <= maxVerions
178      if (hcd.getMinVersions() > hcd.getMaxVersions()) {
179        String message = "Min versions for column family " + hcd.getNameAsString()
180          + " must be less than the Max versions.";
181        warnOrThrowExceptionForFailure(logWarn, message, null);
182      }
183
184      // check data replication factor, it can be 0(default value) when user has not explicitly
185      // set the value, in this case we use default replication factor set in the file system.
186      if (hcd.getDFSReplication() < 0) {
187        String message = "HFile Replication for column family " + hcd.getNameAsString()
188          + "  must be greater than zero.";
189        warnOrThrowExceptionForFailure(logWarn, message, null);
190      }
191
192      // check in-memory compaction
193      warnOrThrowExceptionForFailure(logWarn, hcd::getInMemoryCompaction);
194    }
195  }
196
197  private static void checkReplicationScope(final Configuration conf, final TableDescriptor td)
198    throws IOException {
199    warnOrThrowExceptionForFailure(conf, () -> {
200      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
201        // check replication scope
202        WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(cfd.getScope());
203        if (scop == null) {
204          String message = "Replication scope for column family " + cfd.getNameAsString() + " is "
205            + cfd.getScope() + " which is invalid.";
206
207          throw new DoNotRetryIOException(message);
208        }
209      }
210    });
211  }
212
213  private static void checkCompactionPolicy(final Configuration conf, final TableDescriptor td)
214    throws IOException {
215    warnOrThrowExceptionForFailure(false, () -> {
216      // FIFO compaction has some requirements
217      // Actually FCP ignores periodic major compactions
218      String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
219      if (className == null) {
220        className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
221          ExploringCompactionPolicy.class.getName());
222      }
223
224      int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT;
225      String sv = td.getValue(HStore.BLOCKING_STOREFILES_KEY);
226      if (sv != null) {
227        blockingFileCount = Integer.parseInt(sv);
228      } else {
229        blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
230      }
231
232      for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
233        String compactionPolicy =
234          hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
235        if (compactionPolicy == null) {
236          compactionPolicy = className;
237        }
238        if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) {
239          continue;
240        }
241        // FIFOCompaction
242        String message = null;
243
244        // 1. Check TTL
245        if (hcd.getTimeToLive() == ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
246          message = "Default TTL is not supported for FIFO compaction";
247          throw new IOException(message);
248        }
249
250        // 2. Check min versions
251        if (hcd.getMinVersions() > 0) {
252          message = "MIN_VERSION > 0 is not supported for FIFO compaction";
253          throw new IOException(message);
254        }
255
256        // 3. blocking file count
257        sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
258        if (sv != null) {
259          blockingFileCount = Integer.parseInt(sv);
260        }
261        if (blockingFileCount < 1000) {
262          message =
263            "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount
264              + " is below recommended minimum of 1000 for column family " + hcd.getNameAsString();
265          throw new IOException(message);
266        }
267      }
268    });
269  }
270
271  private static void checkBloomFilterType(final Configuration conf, final TableDescriptor td)
272    throws IOException {
273    warnOrThrowExceptionForFailure(conf, () -> {
274      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
275        Configuration cfdConf = new CompoundConfiguration().addStringMap(cfd.getConfiguration());
276        try {
277          BloomFilterUtil.getBloomFilterParam(cfd.getBloomFilterType(), cfdConf);
278        } catch (IllegalArgumentException e) {
279          throw new DoNotRetryIOException("Failed to get bloom filter param", e);
280        }
281      }
282    });
283  }
284
285  public static void checkCompression(final Configuration conf, final TableDescriptor td)
286    throws IOException {
287    warnOrThrowExceptionForFailure(conf, () -> {
288      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
289        CompressionTest.testCompression(cfd.getCompressionType());
290        CompressionTest.testCompression(cfd.getCompactionCompressionType());
291        CompressionTest.testCompression(cfd.getMajorCompactionCompressionType());
292        CompressionTest.testCompression(cfd.getMinorCompactionCompressionType());
293      }
294    });
295  }
296
297  public static void checkEncryption(final Configuration conf, final TableDescriptor td)
298    throws IOException {
299    warnOrThrowExceptionForFailure(conf, () -> {
300      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
301        EncryptionTest.testEncryption(conf, cfd.getEncryptionType(), cfd.getEncryptionKey());
302      }
303    });
304  }
305
306  public static void checkClassLoading(final Configuration conf, final TableDescriptor td)
307    throws IOException {
308    warnOrThrowExceptionForFailure(conf, () -> {
309      RegionSplitPolicy.getSplitPolicyClass(td, conf);
310      RegionCoprocessorHost.testTableCoprocessorAttrs(conf, td);
311    });
312  }
313
314  // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled.
315  private static void warnOrThrowExceptionForFailure(boolean logWarn, String message,
316    Exception cause) throws IOException {
317    if (!logWarn) {
318      throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS
319        + " to false at conf or table descriptor if you want to bypass sanity checks", cause);
320    }
321    LOG.warn(message);
322  }
323
324  private static void warnOrThrowExceptionForFailure(Configuration conf, ThrowingRunnable runnable)
325    throws IOException {
326    boolean logWarn = !shouldSanityCheck(conf);
327    warnOrThrowExceptionForFailure(logWarn, runnable);
328  }
329
330  private static void warnOrThrowExceptionForFailure(boolean logWarn, ThrowingRunnable runnable)
331    throws IOException {
332    try {
333      runnable.run();
334    } catch (Exception e) {
335      warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
336    }
337  }
338
339  @FunctionalInterface
340  interface ThrowingRunnable {
341    void run() throws Exception;
342  }
343}