001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.util;
019
020import static org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine.DATE_TIERED_STORE_ENGINE;
021
022import java.io.IOException;
023import org.apache.hadoop.conf.Configuration;
024import org.apache.hadoop.hbase.CompoundConfiguration;
025import org.apache.hadoop.hbase.DoNotRetryIOException;
026import org.apache.hadoop.hbase.HConstants;
027import org.apache.hadoop.hbase.TableName;
028import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
029import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
030import org.apache.hadoop.hbase.client.TableDescriptor;
031import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
032import org.apache.hadoop.hbase.conf.ConfigKey;
033import org.apache.hadoop.hbase.fs.ErasureCodingUtils;
034import org.apache.hadoop.hbase.regionserver.DataTieringManager;
035import org.apache.hadoop.hbase.regionserver.DataTieringType;
036import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
037import org.apache.hadoop.hbase.regionserver.HStore;
038import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
039import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
040import org.apache.hadoop.hbase.regionserver.StoreEngine;
041import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
042import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
043import org.apache.yetus.audience.InterfaceAudience;
044import org.slf4j.Logger;
045import org.slf4j.LoggerFactory;
046
047import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
048
049/**
050 * Only used for master to sanity check {@link org.apache.hadoop.hbase.client.TableDescriptor}.
051 */
052@InterfaceAudience.Private
053public final class TableDescriptorChecker {
054  private static Logger LOG = LoggerFactory.getLogger(TableDescriptorChecker.class);
055
056  public static final String TABLE_SANITY_CHECKS = "hbase.table.sanity.checks";
057  public static final boolean DEFAULT_TABLE_SANITY_CHECKS = true;
058
059  // should we check the compression codec type at master side, default true, HBASE-6370
060  public static final String MASTER_CHECK_COMPRESSION = "hbase.master.check.compression";
061  public static final boolean DEFAULT_MASTER_CHECK_COMPRESSION = true;
062
063  // should we check encryption settings at master side, default true
064  public static final String MASTER_CHECK_ENCRYPTION = "hbase.master.check.encryption";
065  public static final boolean DEFAULT_MASTER_CHECK_ENCRYPTION = true;
066
067  private TableDescriptorChecker() {
068  }
069
070  private static boolean shouldSanityCheck(final Configuration conf) {
071    if (conf.getBoolean(TABLE_SANITY_CHECKS, DEFAULT_TABLE_SANITY_CHECKS)) {
072      return true;
073    }
074    return false;
075  }
076
077  /**
078   * Checks whether the table conforms to some sane limits, and configured values (compression, etc)
079   * work. Throws an exception if something is wrong.
080   */
081  public static void sanityCheck(final Configuration c, final TableDescriptor td)
082    throws IOException {
083    CompoundConfiguration conf = new CompoundConfiguration().add(c).addBytesMap(td.getValues());
084
085    // Setting logs to warning instead of throwing exception if sanityChecks are disabled
086    boolean logWarn = !shouldSanityCheck(conf);
087
088    // Check value types
089    warnOrThrowExceptionForFailure(logWarn, () -> ConfigKey.validate(conf));
090    warnOrThrowExceptionForFailure(logWarn, () -> {
091      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
092        ConfigKey.validate(new CompoundConfiguration().addStringMap(cfd.getConfiguration())
093          .addBytesMap(cfd.getValues()));
094      }
095    });
096
097    // check max file size
098    long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit
099    // if not set MAX_FILESIZE in TableDescriptor, and not set HREGION_MAX_FILESIZE in
100    // hbase-site.xml, use maxFileSizeLowerLimit instead to skip this check
101    long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null
102      ? conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit)
103      : Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE));
104    if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
105      String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" ("
106        + maxFileSize + ") is too small, which might cause over splitting into unmanageable "
107        + "number of regions.";
108      warnOrThrowExceptionForFailure(logWarn, message, null);
109    }
110
111    // check flush size
112    long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit
113    // if not set MEMSTORE_FLUSHSIZE in TableDescriptor, and not set HREGION_MEMSTORE_FLUSH_SIZE in
114    // hbase-site.xml, use flushSizeLowerLimit instead to skip this check
115    long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null
116      ? conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit)
117      : Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE));
118    if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
119      String message =
120        "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" ("
121          + flushSize + ") is too small, which might cause" + " very frequent flushing.";
122      warnOrThrowExceptionForFailure(logWarn, message, null);
123    }
124
125    // check that coprocessors and other specified plugin classes can be loaded
126    checkClassLoading(conf, td);
127
128    if (conf.getBoolean(MASTER_CHECK_COMPRESSION, DEFAULT_MASTER_CHECK_COMPRESSION)) {
129      // check compression can be loaded
130      checkCompression(conf, td);
131    }
132
133    if (conf.getBoolean(MASTER_CHECK_ENCRYPTION, DEFAULT_MASTER_CHECK_ENCRYPTION)) {
134      // check encryption can be loaded
135      checkEncryption(conf, td);
136    }
137
138    // Verify compaction policy
139    checkCompactionPolicy(conf, td);
140    // check that we have at least 1 CF
141    if (td.getColumnFamilyCount() == 0) {
142      String message = "Table should have at least one column family.";
143      warnOrThrowExceptionForFailure(logWarn, message, null);
144    }
145
146    // check that we have minimum 1 region replicas
147    int regionReplicas = td.getRegionReplication();
148    if (regionReplicas < 1) {
149      String message = "Table region replication should be at least one.";
150      warnOrThrowExceptionForFailure(logWarn, message, null);
151    }
152
153    // Meta table shouldn't be set as read only, otherwise it will impact region assignments
154    if (td.isReadOnly() && TableName.isMetaTableName(td.getTableName())) {
155      warnOrThrowExceptionForFailure(false, "Meta table can't be set as read only.", null);
156    }
157
158    // check replication scope
159    checkReplicationScope(conf, td);
160
161    // check bloom filter type
162    checkBloomFilterType(conf, td);
163
164    if (td.getErasureCodingPolicy() != null) {
165      warnOrThrowExceptionForFailure(logWarn,
166        () -> ErasureCodingUtils.verifySupport(conf, td.getErasureCodingPolicy()));
167    }
168
169    for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
170      if (hcd.getTimeToLive() <= 0) {
171        String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
172        warnOrThrowExceptionForFailure(logWarn, message, null);
173      }
174
175      // check blockSize
176      if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
177        String message = "Block size for column family " + hcd.getNameAsString()
178          + "  must be between 1K and 16MB.";
179        warnOrThrowExceptionForFailure(logWarn, message, null);
180      }
181
182      // check versions
183      if (hcd.getMinVersions() < 0) {
184        String message =
185          "Min versions for column family " + hcd.getNameAsString() + "  must be positive.";
186        warnOrThrowExceptionForFailure(logWarn, message, null);
187      }
188      // max versions already being checked
189
190      // HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor
191      // does not throw IllegalArgumentException
192      // check minVersions <= maxVerions
193      if (hcd.getMinVersions() > hcd.getMaxVersions()) {
194        String message = "Min versions for column family " + hcd.getNameAsString()
195          + " must be less than the Max versions.";
196        warnOrThrowExceptionForFailure(logWarn, message, null);
197      }
198
199      // check data replication factor, it can be 0(default value) when user has not explicitly
200      // set the value, in this case we use default replication factor set in the file system.
201      if (hcd.getDFSReplication() < 0) {
202        String message = "HFile Replication for column family " + hcd.getNameAsString()
203          + "  must be greater than zero.";
204        warnOrThrowExceptionForFailure(logWarn, message, null);
205      }
206
207      // check in-memory compaction
208      warnOrThrowExceptionForFailure(logWarn, hcd::getInMemoryCompaction);
209
210      checkDateTieredCompactionForTimeRangeDataTiering(conf, td);
211    }
212  }
213
214  private static void checkReplicationScope(final Configuration conf, final TableDescriptor td)
215    throws IOException {
216    warnOrThrowExceptionForFailure(conf, () -> {
217      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
218        // check replication scope
219        WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(cfd.getScope());
220        if (scop == null) {
221          String message = "Replication scope for column family " + cfd.getNameAsString() + " is "
222            + cfd.getScope() + " which is invalid.";
223
224          throw new DoNotRetryIOException(message);
225        }
226      }
227    });
228  }
229
230  private static void checkDateTieredCompactionForTimeRangeDataTiering(final Configuration conf,
231    final TableDescriptor td) throws IOException {
232    // Table level configurations
233    checkDateTieredCompactionForTimeRangeDataTiering(conf);
234    for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
235      // Column family level configurations
236      Configuration cfdConf =
237        new CompoundConfiguration().add(conf).addStringMap(cfd.getConfiguration());
238      checkDateTieredCompactionForTimeRangeDataTiering(cfdConf);
239    }
240  }
241
242  private static void checkDateTieredCompactionForTimeRangeDataTiering(final Configuration conf)
243    throws IOException {
244    final String errorMessage =
245      "Time Range Data Tiering should be enabled with Date Tiered Compaction.";
246
247    warnOrThrowExceptionForFailure(false, () -> {
248
249      // Determine whether Date Tiered Compaction will be enabled when Time Range Data Tiering is
250      // enabled after the configuration change.
251      if (DataTieringType.TIME_RANGE.name().equals(conf.get(DataTieringManager.DATATIERING_KEY))) {
252        if (!DATE_TIERED_STORE_ENGINE.equals(conf.get(StoreEngine.STORE_ENGINE_CLASS_KEY))) {
253          throw new IllegalArgumentException(errorMessage);
254        }
255      }
256    });
257  }
258
259  private static void checkCompactionPolicy(final Configuration conf, final TableDescriptor td)
260    throws IOException {
261    warnOrThrowExceptionForFailure(false, () -> {
262      // FIFO compaction has some requirements
263      // Actually FCP ignores periodic major compactions
264      String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
265      if (className == null) {
266        className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
267          ExploringCompactionPolicy.class.getName());
268      }
269
270      int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT;
271      String sv = td.getValue(HStore.BLOCKING_STOREFILES_KEY);
272      if (sv != null) {
273        blockingFileCount = Integer.parseInt(sv);
274      } else {
275        blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
276      }
277
278      for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
279        String compactionPolicy =
280          hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
281        if (compactionPolicy == null) {
282          compactionPolicy = className;
283        }
284        if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) {
285          continue;
286        }
287        // FIFOCompaction
288        String message = null;
289
290        // 1. Check TTL
291        if (hcd.getTimeToLive() == ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
292          message = "Default TTL is not supported for FIFO compaction";
293          throw new IOException(message);
294        }
295
296        // 2. Check min versions
297        if (hcd.getMinVersions() > 0) {
298          message = "MIN_VERSION > 0 is not supported for FIFO compaction";
299          throw new IOException(message);
300        }
301
302        // 3. blocking file count
303        sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
304        if (sv != null) {
305          blockingFileCount = Integer.parseInt(sv);
306        }
307        if (blockingFileCount < 1000) {
308          message =
309            "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount
310              + " is below recommended minimum of 1000 for column family " + hcd.getNameAsString();
311          throw new IOException(message);
312        }
313      }
314    });
315  }
316
317  private static void checkBloomFilterType(final Configuration conf, final TableDescriptor td)
318    throws IOException {
319    warnOrThrowExceptionForFailure(conf, () -> {
320      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
321        Configuration cfdConf = new CompoundConfiguration().addStringMap(cfd.getConfiguration());
322        try {
323          BloomFilterUtil.getBloomFilterParam(cfd.getBloomFilterType(), cfdConf);
324        } catch (IllegalArgumentException e) {
325          throw new DoNotRetryIOException("Failed to get bloom filter param", e);
326        }
327      }
328    });
329  }
330
331  public static void checkCompression(final Configuration conf, final TableDescriptor td)
332    throws IOException {
333    warnOrThrowExceptionForFailure(conf, () -> {
334      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
335        CompressionTest.testCompression(cfd.getCompressionType());
336        CompressionTest.testCompression(cfd.getCompactionCompressionType());
337        CompressionTest.testCompression(cfd.getMajorCompactionCompressionType());
338        CompressionTest.testCompression(cfd.getMinorCompactionCompressionType());
339      }
340    });
341  }
342
343  public static void checkEncryption(final Configuration conf, final TableDescriptor td)
344    throws IOException {
345    warnOrThrowExceptionForFailure(conf, () -> {
346      for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
347        EncryptionTest.testEncryption(conf, cfd.getEncryptionType(), cfd.getEncryptionKey());
348      }
349    });
350  }
351
352  public static void checkClassLoading(final Configuration conf, final TableDescriptor td)
353    throws IOException {
354    warnOrThrowExceptionForFailure(conf, () -> {
355      RegionSplitPolicy.getSplitPolicyClass(td, conf);
356      RegionCoprocessorHost.testTableCoprocessorAttrs(conf, td);
357    });
358  }
359
360  // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled.
361  private static void warnOrThrowExceptionForFailure(boolean logWarn, String message,
362    Exception cause) throws IOException {
363    if (!logWarn) {
364      throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS
365        + " to false at conf or table descriptor if you want to bypass sanity checks", cause);
366    }
367    LOG.warn(message);
368  }
369
370  private static void warnOrThrowExceptionForFailure(Configuration conf, ThrowingRunnable runnable)
371    throws IOException {
372    boolean logWarn = !shouldSanityCheck(conf);
373    warnOrThrowExceptionForFailure(logWarn, runnable);
374  }
375
376  private static void warnOrThrowExceptionForFailure(boolean logWarn, ThrowingRunnable runnable)
377    throws IOException {
378    try {
379      runnable.run();
380    } catch (Exception e) {
381      warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
382    }
383  }
384
385  @FunctionalInterface
386  interface ThrowingRunnable {
387    void run() throws Exception;
388  }
389}