001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.util;
019
020import java.io.IOException;
021
022import org.apache.hadoop.conf.Configuration;
023import org.apache.hadoop.hbase.CompoundConfiguration;
024import org.apache.hadoop.hbase.DoNotRetryIOException;
025import org.apache.hadoop.hbase.HConstants;
026import org.apache.hadoop.hbase.TableName;
027import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
028import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
029import org.apache.hadoop.hbase.client.TableDescriptor;
030import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
031import org.apache.hadoop.hbase.regionserver.HStore;
032import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
033import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
034import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
035import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
036import org.apache.yetus.audience.InterfaceAudience;
037import org.slf4j.Logger;
038import org.slf4j.LoggerFactory;
039
040import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
041
042/**
043 * Only used for master to sanity check {@link org.apache.hadoop.hbase.client.TableDescriptor}.
044 */
045@InterfaceAudience.Private
046public final class TableDescriptorChecker {
047  private static Logger LOG = LoggerFactory.getLogger(TableDescriptorChecker.class);
048
049  public static final String TABLE_SANITY_CHECKS = "hbase.table.sanity.checks";
050  public static final boolean DEFAULT_TABLE_SANITY_CHECKS = true;
051
052  //should we check the compression codec type at master side, default true, HBASE-6370
053  public static final String MASTER_CHECK_COMPRESSION = "hbase.master.check.compression";
054  public static final boolean DEFAULT_MASTER_CHECK_COMPRESSION = true;
055
056  //should we check encryption settings at master side, default true
057  public static final String MASTER_CHECK_ENCRYPTION = "hbase.master.check.encryption";
058  public static final boolean DEFAULT_MASTER_CHECK_ENCRYPTION = true;
059
060  private TableDescriptorChecker() {
061  }
062
063  /**
064   * Checks whether the table conforms to some sane limits, and configured
065   * values (compression, etc) work. Throws an exception if something is wrong.
066   */
067  public static void sanityCheck(final Configuration c, final TableDescriptor td)
068      throws IOException {
069    CompoundConfiguration conf = new CompoundConfiguration()
070      .add(c)
071      .addBytesMap(td.getValues());
072
073    // Setting this to true logs the warning instead of throwing exception
074    boolean logWarn = false;
075    if (!conf.getBoolean(TABLE_SANITY_CHECKS, DEFAULT_TABLE_SANITY_CHECKS)) {
076      logWarn = true;
077    }
078    String tableVal = td.getValue(TABLE_SANITY_CHECKS);
079    if (tableVal != null && !Boolean.valueOf(tableVal)) {
080      logWarn = true;
081    }
082
083    // check max file size
084    long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit
085    long maxFileSize = td.getMaxFileSize();
086    if (maxFileSize < 0) {
087      maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit);
088    }
089    if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
090      String message =
091          "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" +
092              maxFileSize + ") is too small, which might cause over splitting into unmanageable " +
093              "number of regions.";
094      warnOrThrowExceptionForFailure(logWarn, message, null);
095    }
096
097    // check flush size
098    long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit
099    long flushSize = td.getMemStoreFlushSize();
100    if (flushSize < 0) {
101      flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit);
102    }
103    if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
104      String message = "MEMSTORE_FLUSHSIZE for table descriptor or " +
105          "\"hbase.hregion.memstore.flush.size\" (" + flushSize +
106          ") is too small, which might cause" + " very frequent flushing.";
107      warnOrThrowExceptionForFailure(logWarn, message, null);
108    }
109
110    // check that coprocessors and other specified plugin classes can be loaded
111    try {
112      checkClassLoading(conf, td);
113    } catch (Exception ex) {
114      warnOrThrowExceptionForFailure(logWarn, ex.getMessage(), null);
115    }
116
117    if (conf.getBoolean(MASTER_CHECK_COMPRESSION, DEFAULT_MASTER_CHECK_COMPRESSION)) {
118      // check compression can be loaded
119      try {
120        checkCompression(td);
121      } catch (IOException e) {
122        warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
123      }
124    }
125
126    if (conf.getBoolean(MASTER_CHECK_ENCRYPTION, DEFAULT_MASTER_CHECK_ENCRYPTION)) {
127      // check encryption can be loaded
128      try {
129        checkEncryption(conf, td);
130      } catch (IOException e) {
131        warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
132      }
133    }
134
135    // Verify compaction policy
136    try {
137      checkCompactionPolicy(conf, td);
138    } catch (IOException e) {
139      warnOrThrowExceptionForFailure(false, e.getMessage(), e);
140    }
141    // check that we have at least 1 CF
142    if (td.getColumnFamilyCount() == 0) {
143      String message = "Table should have at least one column family.";
144      warnOrThrowExceptionForFailure(logWarn, message, null);
145    }
146
147    // check that we have minimum 1 region replicas
148    int regionReplicas = td.getRegionReplication();
149    if (regionReplicas < 1) {
150      String message = "Table region replication should be at least one.";
151      warnOrThrowExceptionForFailure(logWarn, message, null);
152    }
153
154    // Meta table shouldn't be set as read only, otherwise it will impact region assignments
155    if (td.isReadOnly() && TableName.isMetaTableName(td.getTableName())) {
156      warnOrThrowExceptionForFailure(false, "Meta table can't be set as read only.", null);
157    }
158
159    for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
160      if (hcd.getTimeToLive() <= 0) {
161        String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
162        warnOrThrowExceptionForFailure(logWarn, message, null);
163      }
164
165      // check blockSize
166      if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
167        String message = "Block size for column family " + hcd.getNameAsString() +
168            "  must be between 1K and 16MB.";
169        warnOrThrowExceptionForFailure(logWarn, message, null);
170      }
171
172      // check versions
173      if (hcd.getMinVersions() < 0) {
174        String message =
175            "Min versions for column family " + hcd.getNameAsString() + "  must be positive.";
176        warnOrThrowExceptionForFailure(logWarn, message, null);
177      }
178      // max versions already being checked
179
180      // HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor
181      //  does not throw IllegalArgumentException
182      // check minVersions <= maxVerions
183      if (hcd.getMinVersions() > hcd.getMaxVersions()) {
184        String message = "Min versions for column family " + hcd.getNameAsString() +
185            " must be less than the Max versions.";
186        warnOrThrowExceptionForFailure(logWarn, message, null);
187      }
188
189      // check replication scope
190      checkReplicationScope(hcd);
191      // check bloom filter type
192      checkBloomFilterType(hcd);
193
194      // check data replication factor, it can be 0(default value) when user has not explicitly
195      // set the value, in this case we use default replication factor set in the file system.
196      if (hcd.getDFSReplication() < 0) {
197        String message = "HFile Replication for column family " + hcd.getNameAsString() +
198            "  must be greater than zero.";
199        warnOrThrowExceptionForFailure(logWarn, message, null);
200      }
201
202      // check in-memory compaction
203      try {
204        hcd.getInMemoryCompaction();
205      } catch (IllegalArgumentException e) {
206        warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
207      }
208    }
209  }
210
211  private static void checkReplicationScope(final ColumnFamilyDescriptor cfd) throws IOException {
212    // check replication scope
213    WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(cfd.getScope());
214    if (scop == null) {
215      String message =
216          "Replication scope for column family " + cfd.getNameAsString() + " is " + cfd.getScope() +
217              " which is invalid.";
218
219      LOG.error(message);
220      throw new DoNotRetryIOException(message);
221    }
222  }
223
224  private static void checkCompactionPolicy(Configuration conf, TableDescriptor td)
225      throws IOException {
226    // FIFO compaction has some requirements
227    // Actually FCP ignores periodic major compactions
228    String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
229    if (className == null) {
230      className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
231          ExploringCompactionPolicy.class.getName());
232    }
233
234    int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT;
235    String sv = td.getValue(HStore.BLOCKING_STOREFILES_KEY);
236    if (sv != null) {
237      blockingFileCount = Integer.parseInt(sv);
238    } else {
239      blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
240    }
241
242    for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
243      String compactionPolicy =
244          hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
245      if (compactionPolicy == null) {
246        compactionPolicy = className;
247      }
248      if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) {
249        continue;
250      }
251      // FIFOCompaction
252      String message = null;
253
254      // 1. Check TTL
255      if (hcd.getTimeToLive() == ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
256        message = "Default TTL is not supported for FIFO compaction";
257        throw new IOException(message);
258      }
259
260      // 2. Check min versions
261      if (hcd.getMinVersions() > 0) {
262        message = "MIN_VERSION > 0 is not supported for FIFO compaction";
263        throw new IOException(message);
264      }
265
266      // 3. blocking file count
267      sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
268      if (sv != null) {
269        blockingFileCount = Integer.parseInt(sv);
270      }
271      if (blockingFileCount < 1000) {
272        message =
273            "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount +
274                " is below recommended minimum of 1000 for column family " + hcd.getNameAsString();
275        throw new IOException(message);
276      }
277    }
278  }
279
280  private static void checkBloomFilterType(ColumnFamilyDescriptor cfd) throws IOException {
281    Configuration conf = new CompoundConfiguration().addStringMap(cfd.getConfiguration());
282    try {
283      BloomFilterUtil.getBloomFilterParam(cfd.getBloomFilterType(), conf);
284    } catch (IllegalArgumentException e) {
285      throw new DoNotRetryIOException("Failed to get bloom filter param", e);
286    }
287  }
288
289  public static void checkCompression(final TableDescriptor td) throws IOException {
290    for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
291      CompressionTest.testCompression(cfd.getCompressionType());
292      CompressionTest.testCompression(cfd.getCompactionCompressionType());
293    }
294  }
295
296  public static void checkEncryption(final Configuration conf, final TableDescriptor td)
297      throws IOException {
298    for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
299      EncryptionTest.testEncryption(conf, cfd.getEncryptionType(), cfd.getEncryptionKey());
300    }
301  }
302
303  public static void checkClassLoading(final Configuration conf, final TableDescriptor td)
304      throws IOException {
305    RegionSplitPolicy.getSplitPolicyClass(td, conf);
306    RegionCoprocessorHost.testTableCoprocessorAttrs(conf, td);
307  }
308
309  // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled.
310  private static void warnOrThrowExceptionForFailure(boolean logWarn, String message,
311      Exception cause) throws IOException {
312    if (!logWarn) {
313      throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS +
314          " to false at conf or table descriptor if you want to bypass sanity checks", cause);
315    }
316    LOG.warn(message);
317  }
318}