001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.util;
019
020import java.io.IOException;
021
022import org.apache.hadoop.conf.Configuration;
023import org.apache.hadoop.hbase.CompoundConfiguration;
024import org.apache.hadoop.hbase.DoNotRetryIOException;
025import org.apache.hadoop.hbase.HConstants;
026import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
027import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
028import org.apache.hadoop.hbase.client.TableDescriptor;
029import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
030import org.apache.hadoop.hbase.regionserver.HStore;
031import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
032import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
033import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
034import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
035import org.apache.yetus.audience.InterfaceAudience;
036import org.slf4j.Logger;
037import org.slf4j.LoggerFactory;
038
039import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
040
041/**
042 * Only used for master to sanity check {@link org.apache.hadoop.hbase.client.TableDescriptor}.
043 */
044@InterfaceAudience.Private
045public final class TableDescriptorChecker {
046  private static Logger LOG = LoggerFactory.getLogger(TableDescriptorChecker.class);
047
048  public static final String TABLE_SANITY_CHECKS = "hbase.table.sanity.checks";
049  public static final boolean DEFAULT_TABLE_SANITY_CHECKS = true;
050
051  //should we check the compression codec type at master side, default true, HBASE-6370
052  public static final String MASTER_CHECK_COMPRESSION = "hbase.master.check.compression";
053  public static final boolean DEFAULT_MASTER_CHECK_COMPRESSION = true;
054
055  //should we check encryption settings at master side, default true
056  public static final String MASTER_CHECK_ENCRYPTION = "hbase.master.check.encryption";
057  public static final boolean DEFAULT_MASTER_CHECK_ENCRYPTION = true;
058
059  private TableDescriptorChecker() {
060  }
061
062  /**
063   * Checks whether the table conforms to some sane limits, and configured
064   * values (compression, etc) work. Throws an exception if something is wrong.
065   */
066  public static void sanityCheck(final Configuration c, final TableDescriptor td)
067      throws IOException {
068    CompoundConfiguration conf = new CompoundConfiguration()
069      .add(c)
070      .addBytesMap(td.getValues());
071
072    // Setting this to true logs the warning instead of throwing exception
073    boolean logWarn = false;
074    if (!conf.getBoolean(TABLE_SANITY_CHECKS, DEFAULT_TABLE_SANITY_CHECKS)) {
075      logWarn = true;
076    }
077    String tableVal = td.getValue(TABLE_SANITY_CHECKS);
078    if (tableVal != null && !Boolean.valueOf(tableVal)) {
079      logWarn = true;
080    }
081
082    // check max file size
083    long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit
084    long maxFileSize = td.getMaxFileSize();
085    if (maxFileSize < 0) {
086      maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit);
087    }
088    if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
089      String message =
090          "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" +
091              maxFileSize + ") is too small, which might cause over splitting into unmanageable " +
092              "number of regions.";
093      warnOrThrowExceptionForFailure(logWarn, message, null);
094    }
095
096    // check flush size
097    long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit
098    long flushSize = td.getMemStoreFlushSize();
099    if (flushSize < 0) {
100      flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit);
101    }
102    if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
103      String message = "MEMSTORE_FLUSHSIZE for table descriptor or " +
104          "\"hbase.hregion.memstore.flush.size\" (" + flushSize +
105          ") is too small, which might cause" + " very frequent flushing.";
106      warnOrThrowExceptionForFailure(logWarn, message, null);
107    }
108
109    // check that coprocessors and other specified plugin classes can be loaded
110    try {
111      checkClassLoading(conf, td);
112    } catch (Exception ex) {
113      warnOrThrowExceptionForFailure(logWarn, ex.getMessage(), null);
114    }
115
116    if (conf.getBoolean(MASTER_CHECK_COMPRESSION, DEFAULT_MASTER_CHECK_COMPRESSION)) {
117      // check compression can be loaded
118      try {
119        checkCompression(td);
120      } catch (IOException e) {
121        warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
122      }
123    }
124
125    if (conf.getBoolean(MASTER_CHECK_ENCRYPTION, DEFAULT_MASTER_CHECK_ENCRYPTION)) {
126      // check encryption can be loaded
127      try {
128        checkEncryption(conf, td);
129      } catch (IOException e) {
130        warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
131      }
132    }
133
134    // Verify compaction policy
135    try {
136      checkCompactionPolicy(conf, td);
137    } catch (IOException e) {
138      warnOrThrowExceptionForFailure(false, e.getMessage(), e);
139    }
140    // check that we have at least 1 CF
141    if (td.getColumnFamilyCount() == 0) {
142      String message = "Table should have at least one column family.";
143      warnOrThrowExceptionForFailure(logWarn, message, null);
144    }
145
146    // check that we have minimum 1 region replicas
147    int regionReplicas = td.getRegionReplication();
148    if (regionReplicas < 1) {
149      String message = "Table region replication should be at least one.";
150      warnOrThrowExceptionForFailure(logWarn, message, null);
151    }
152
153    for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
154      if (hcd.getTimeToLive() <= 0) {
155        String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
156        warnOrThrowExceptionForFailure(logWarn, message, null);
157      }
158
159      // check blockSize
160      if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
161        String message = "Block size for column family " + hcd.getNameAsString() +
162            "  must be between 1K and 16MB.";
163        warnOrThrowExceptionForFailure(logWarn, message, null);
164      }
165
166      // check versions
167      if (hcd.getMinVersions() < 0) {
168        String message =
169            "Min versions for column family " + hcd.getNameAsString() + "  must be positive.";
170        warnOrThrowExceptionForFailure(logWarn, message, null);
171      }
172      // max versions already being checked
173
174      // HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor
175      //  does not throw IllegalArgumentException
176      // check minVersions <= maxVerions
177      if (hcd.getMinVersions() > hcd.getMaxVersions()) {
178        String message = "Min versions for column family " + hcd.getNameAsString() +
179            " must be less than the Max versions.";
180        warnOrThrowExceptionForFailure(logWarn, message, null);
181      }
182
183      // check replication scope
184      checkReplicationScope(hcd);
185      // check bloom filter type
186      checkBloomFilterType(hcd);
187
188      // check data replication factor, it can be 0(default value) when user has not explicitly
189      // set the value, in this case we use default replication factor set in the file system.
190      if (hcd.getDFSReplication() < 0) {
191        String message = "HFile Replication for column family " + hcd.getNameAsString() +
192            "  must be greater than zero.";
193        warnOrThrowExceptionForFailure(logWarn, message, null);
194      }
195
196      // check in-memory compaction
197      try {
198        hcd.getInMemoryCompaction();
199      } catch (IllegalArgumentException e) {
200        warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
201      }
202    }
203  }
204
205  private static void checkReplicationScope(final ColumnFamilyDescriptor cfd) throws IOException {
206    // check replication scope
207    WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(cfd.getScope());
208    if (scop == null) {
209      String message =
210          "Replication scope for column family " + cfd.getNameAsString() + " is " + cfd.getScope() +
211              " which is invalid.";
212
213      LOG.error(message);
214      throw new DoNotRetryIOException(message);
215    }
216  }
217
218  private static void checkCompactionPolicy(Configuration conf, TableDescriptor td)
219      throws IOException {
220    // FIFO compaction has some requirements
221    // Actually FCP ignores periodic major compactions
222    String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
223    if (className == null) {
224      className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
225          ExploringCompactionPolicy.class.getName());
226    }
227
228    int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT;
229    String sv = td.getValue(HStore.BLOCKING_STOREFILES_KEY);
230    if (sv != null) {
231      blockingFileCount = Integer.parseInt(sv);
232    } else {
233      blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
234    }
235
236    for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
237      String compactionPolicy =
238          hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
239      if (compactionPolicy == null) {
240        compactionPolicy = className;
241      }
242      if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) {
243        continue;
244      }
245      // FIFOCompaction
246      String message = null;
247
248      // 1. Check TTL
249      if (hcd.getTimeToLive() == ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
250        message = "Default TTL is not supported for FIFO compaction";
251        throw new IOException(message);
252      }
253
254      // 2. Check min versions
255      if (hcd.getMinVersions() > 0) {
256        message = "MIN_VERSION > 0 is not supported for FIFO compaction";
257        throw new IOException(message);
258      }
259
260      // 3. blocking file count
261      sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
262      if (sv != null) {
263        blockingFileCount = Integer.parseInt(sv);
264      }
265      if (blockingFileCount < 1000) {
266        message =
267            "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount +
268                " is below recommended minimum of 1000 for column family " + hcd.getNameAsString();
269        throw new IOException(message);
270      }
271    }
272  }
273
274  private static void checkBloomFilterType(ColumnFamilyDescriptor cfd) throws IOException {
275    Configuration conf = new CompoundConfiguration().addStringMap(cfd.getConfiguration());
276    try {
277      BloomFilterUtil.getBloomFilterParam(cfd.getBloomFilterType(), conf);
278    } catch (IllegalArgumentException e) {
279      throw new DoNotRetryIOException("Failed to get bloom filter param", e);
280    }
281  }
282
283  public static void checkCompression(final TableDescriptor td) throws IOException {
284    for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
285      CompressionTest.testCompression(cfd.getCompressionType());
286      CompressionTest.testCompression(cfd.getCompactionCompressionType());
287    }
288  }
289
290  public static void checkEncryption(final Configuration conf, final TableDescriptor td)
291      throws IOException {
292    for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
293      EncryptionTest.testEncryption(conf, cfd.getEncryptionType(), cfd.getEncryptionKey());
294    }
295  }
296
297  public static void checkClassLoading(final Configuration conf, final TableDescriptor td)
298      throws IOException {
299    RegionSplitPolicy.getSplitPolicyClass(td, conf);
300    RegionCoprocessorHost.testTableCoprocessorAttrs(conf, td);
301  }
302
303  // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled.
304  private static void warnOrThrowExceptionForFailure(boolean logWarn, String message,
305      Exception cause) throws IOException {
306    if (!logWarn) {
307      throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS +
308          " to false at conf or table descriptor if you want to bypass sanity checks", cause);
309    }
310    LOG.warn(message);
311  }
312}