001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import java.nio.ByteBuffer;
021import java.util.Arrays;
022import java.util.Collection;
023import java.util.Comparator;
024import java.util.Iterator;
025import java.util.Map;
026import java.util.Optional;
027import java.util.Set;
028import java.util.stream.Stream;
029import java.util.zip.CRC32;
030import org.apache.hadoop.hbase.HConstants;
031import org.apache.hadoop.hbase.TableName;
032import org.apache.hadoop.hbase.util.Bytes;
033import org.apache.yetus.audience.InterfaceAudience;
034import org.slf4j.Logger;
035import org.slf4j.LoggerFactory;
036
037import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
038import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
039
040/**
041 * TableDescriptor contains the details about an HBase table such as the descriptors of all the
042 * column families, is the table a catalog table, <code> hbase:meta </code>, if the table is read
043 * only, the maximum size of the memstore, when the region split should occur, coprocessors
044 * associated with it etc...
045 */
046@InterfaceAudience.Public
047public interface TableDescriptor {
048
049  Logger LOG = LoggerFactory.getLogger(TableDescriptor.class);
050
051  @InterfaceAudience.Private
052  Comparator<TableDescriptor> COMPARATOR = getComparator(ColumnFamilyDescriptor.COMPARATOR);
053
054  @InterfaceAudience.Private
055  Comparator<TableDescriptor> COMPARATOR_IGNORE_REPLICATION =
056    getComparator(ColumnFamilyDescriptor.COMPARATOR_IGNORE_REPLICATION);
057
058  static Comparator<TableDescriptor>
059    getComparator(Comparator<ColumnFamilyDescriptor> cfComparator) {
060    return (TableDescriptor lhs, TableDescriptor rhs) -> {
061      int result = lhs.getTableName().compareTo(rhs.getTableName());
062      if (result != 0) {
063        return result;
064      }
065      Collection<ColumnFamilyDescriptor> lhsFamilies = Arrays.asList(lhs.getColumnFamilies());
066      Collection<ColumnFamilyDescriptor> rhsFamilies = Arrays.asList(rhs.getColumnFamilies());
067      result = Integer.compare(lhsFamilies.size(), rhsFamilies.size());
068      if (result != 0) {
069        return result;
070      }
071
072      for (Iterator<ColumnFamilyDescriptor> it = lhsFamilies.iterator(),
073          it2 = rhsFamilies.iterator(); it.hasNext();) {
074        result = cfComparator.compare(it.next(), it2.next());
075        if (result != 0) {
076          return result;
077        }
078      }
079      // punt on comparison for ordering, just calculate difference
080      return Integer.compare(lhs.getValues().hashCode(), rhs.getValues().hashCode());
081    };
082  }
083
084  /**
085   * Returns the count of the column families of the table.
086   * @return Count of column families of the table
087   */
088  int getColumnFamilyCount();
089
090  /**
091   * Return the list of attached co-processor represented
092   * @return The list of CoprocessorDescriptor
093   */
094  Collection<CoprocessorDescriptor> getCoprocessorDescriptors();
095
096  /**
097   * Returns the durability setting for the table.
098   * @return durability setting for the table.
099   */
100  Durability getDurability();
101
102  /**
103   * Returns an unmodifiable collection of all the {@link ColumnFamilyDescriptor} of all the column
104   * families of the table.
105   * @return An array of {@link ColumnFamilyDescriptor} of all the column families.
106   */
107  ColumnFamilyDescriptor[] getColumnFamilies();
108
109  /**
110   * Returns all the column family names of the current table. The map of TableDescriptor contains
111   * mapping of family name to ColumnDescriptor. This returns all the keys of the family map which
112   * represents the column family names of the table.
113   * @return Immutable sorted set of the keys of the families.
114   */
115  Set<byte[]> getColumnFamilyNames();
116
117  /**
118   * Returns the ColumnDescriptor for a specific column family with name as specified by the
119   * parameter column.
120   * @param name Column family name
121   * @return Column descriptor for the passed family name or the family on passed in column.
122   */
123  ColumnFamilyDescriptor getColumnFamily(final byte[] name);
124
125  /**
126   * This gets the class associated with the flush policy which determines the stores need to be
127   * flushed when flushing a region. The class used by default is defined in
128   * org.apache.hadoop.hbase.regionserver.FlushPolicy.
129   * @return the class name of the flush policy for this table. If this returns null, the default
130   *         flush policy is used.
131   */
132  String getFlushPolicyClassName();
133
134  /**
135   * Returns the maximum size upto which a region can grow to after which a region split is
136   * triggered. The region size is represented by the size of the biggest store file in that region.
137   * @return max hregion size for table, -1 if not set.
138   */
139  long getMaxFileSize();
140
141  /**
142   * Returns the size of the memstore after which a flush to filesystem is triggered.
143   * @return memory cache flush size for each hregion, -1 if not set.
144   */
145  long getMemStoreFlushSize();
146
147  // TODO: Currently this is used RPC scheduling only. Make it more generic than this; allow it
148  // to also be priority when scheduling procedures that pertain to this table scheduling first
149  // those tables with the highest priority (From Yi Liang over on HBASE-18109).
150  int getPriority();
151
152  /** Returns Returns the configured replicas per region */
153  int getRegionReplication();
154
155  /**
156   * This gets the class associated with the region split policy which determines when a region
157   * split should occur. The class used by default is defined in
158   * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
159   * @return the class name of the region split policy for this table. If this returns null, the
160   *         default split policy is used.
161   */
162  String getRegionSplitPolicyClassName();
163
164  /**
165   * Get the name of the table
166   */
167  TableName getTableName();
168
169  /**
170   * Get the region server group this table belongs to. The regions of this table will be placed
171   * only on the region servers within this group. If not present, will be placed on
172   * {@link org.apache.hadoop.hbase.rsgroup.RSGroupInfo#DEFAULT_GROUP}.
173   */
174  Optional<String> getRegionServerGroup();
175
176  /**
177   * Getter for accessing the metadata associated with the key.
178   * @param key The key.
179   * @return A clone value. Null if no mapping for the key
180   */
181  Bytes getValue(Bytes key);
182
183  /**
184   * Getter for accessing the metadata associated with the key.
185   * @param key The key.
186   * @return A clone value. Null if no mapping for the key
187   */
188  byte[] getValue(byte[] key);
189
190  /**
191   * Getter for accessing the metadata associated with the key.
192   * @param key The key.
193   * @return Null if no mapping for the key
194   */
195  String getValue(String key);
196
197  /** Returns Getter for fetching an unmodifiable map. */
198  Map<Bytes, Bytes> getValues();
199
200  /**
201   * Check if the table has an attached co-processor represented by the name className
202   * @param classNameToMatch - Class name of the co-processor
203   * @return true of the table has a co-processor className
204   */
205  boolean hasCoprocessor(String classNameToMatch);
206
207  /**
208   * Checks to see if this table contains the given column family
209   * @param name Family name or column name.
210   * @return true if the table contains the specified family name
211   */
212  boolean hasColumnFamily(final byte[] name);
213
214  /** Returns true if the read-replicas memstore replication is enabled. */
215  boolean hasRegionMemStoreReplication();
216
217  /**
218   * Check if the compaction enable flag of the table is true. If flag is false then no minor/major
219   * compactions will be done in real.
220   * @return true if table compaction enabled
221   */
222  boolean isCompactionEnabled();
223
224  /**
225   * Check if the split enable flag of the table is true. If flag is false then no region split will
226   * be done.
227   * @return true if table region split enabled
228   */
229  boolean isSplitEnabled();
230
231  /**
232   * Check if the merge enable flag of the table is true. If flag is false then no region merge will
233   * be done.
234   * @return true if table region merge enabled
235   */
236  boolean isMergeEnabled();
237
238  /**
239   * Checks if this table is <code> hbase:meta </code> region.
240   * @return true if this table is <code> hbase:meta </code> region
241   */
242  boolean isMetaRegion();
243
244  /**
245   * Checks if the table is a <code>hbase:meta</code> table
246   * @return true if table is <code> hbase:meta </code> region.
247   */
248  boolean isMetaTable();
249
250  /**
251   * Check if normalization enable flag of the table is true. If flag is false then region
252   * normalizer won't attempt to normalize this table.
253   * @return true if region normalization is enabled for this table
254   */
255  boolean isNormalizationEnabled();
256
257  /**
258   * Check if there is the target region count. If so, the normalize plan will be calculated based
259   * on the target region count.
260   * @return target region count after normalize done
261   */
262  int getNormalizerTargetRegionCount();
263
264  /**
265   * Check if there is the target region size. If so, the normalize plan will be calculated based on
266   * the target region size.
267   * @return target region size after normalize done
268   */
269  long getNormalizerTargetRegionSize();
270
271  /**
272   * Check if the readOnly flag of the table is set. If the readOnly flag is set then the contents
273   * of the table can only be read from but not modified.
274   * @return true if all columns in the table should be read only
275   */
276  boolean isReadOnly();
277
278  /**
279   * The HDFS erasure coding policy for a table. This will be set on the data dir of the table, and
280   * is an alternative to normal replication which takes less space at the cost of locality.
281   * @return the current policy, or null if undefined
282   */
283  default String getErasureCodingPolicy() {
284    return null;
285  }
286
287  /**
288   * Returns Name of this table and then a map of all of the column family descriptors (with only
289   * the non-default column family attributes)
290   */
291  String toStringCustomizedValues();
292
293  /**
294   * Check if any of the table's cfs' replication scope are set to
295   * {@link HConstants#REPLICATION_SCOPE_GLOBAL}.
296   * @return {@code true} if we have, otherwise {@code false}.
297   */
298  default boolean hasGlobalReplicationScope() {
299    return Stream.of(getColumnFamilies())
300      .anyMatch(cf -> cf.getScope() == HConstants.REPLICATION_SCOPE_GLOBAL);
301  }
302
303  /**
304   * Check if the table's cfs' replication scope matched with the replication state
305   * @param enabled replication state
306   * @return true if matched, otherwise false
307   */
308  default boolean matchReplicationScope(boolean enabled) {
309    boolean hasEnabled = false;
310    boolean hasDisabled = false;
311
312    for (ColumnFamilyDescriptor cf : getColumnFamilies()) {
313      if (cf.getScope() != HConstants.REPLICATION_SCOPE_GLOBAL) {
314        hasDisabled = true;
315      } else {
316        hasEnabled = true;
317      }
318    }
319
320    if (hasEnabled && hasDisabled) {
321      return false;
322    }
323    if (hasEnabled) {
324      return enabled;
325    }
326    return !enabled;
327  }
328
329  /**
330   * Computes a CRC32 hash of the table descriptor's protobuf representation. This hash can be used
331   * to detect changes in the table descriptor configuration.
332   * @return A hex string representation of the CRC32 hash, or "UNKNOWN" if computation fails
333   */
334  default String getDescriptorHash() {
335    try {
336      HBaseProtos.TableSchema tableSchema = ProtobufUtil.toTableSchema(this);
337      ByteBuffer byteBuffer = ByteBuffer.wrap(tableSchema.toByteArray());
338      CRC32 crc32 = new CRC32();
339      crc32.update(byteBuffer);
340      return Long.toHexString(crc32.getValue());
341    } catch (Exception e) {
342      LOG.error("Failed to compute table descriptor hash for table {}", getTableName(), e);
343      return "UNKNOWN";
344    }
345  }
346}