001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.client;
020
021import java.util.Arrays;
022import java.util.Collection;
023import java.util.Comparator;
024import java.util.Iterator;
025import java.util.Map;
026import java.util.Set;
027import java.util.stream.Collectors;
028import java.util.stream.Stream;
029import org.apache.hadoop.hbase.HConstants;
030import org.apache.hadoop.hbase.TableName;
031import org.apache.hadoop.hbase.util.Bytes;
032import org.apache.yetus.audience.InterfaceAudience;
033
034/**
035 * TableDescriptor contains the details about an HBase table such as the descriptors of
036 * all the column families, is the table a catalog table, <code> hbase:meta </code>,
037 * if the table is read only, the maximum size of the memstore,
038 * when the region split should occur, coprocessors associated with it etc...
039 */
040@InterfaceAudience.Public
041public interface TableDescriptor {
042
043  @InterfaceAudience.Private
044  Comparator<TableDescriptor> COMPARATOR = getComparator(ColumnFamilyDescriptor.COMPARATOR);
045
046  @InterfaceAudience.Private
047  Comparator<TableDescriptor> COMPARATOR_IGNORE_REPLICATION =
048      getComparator(ColumnFamilyDescriptor.COMPARATOR_IGNORE_REPLICATION);
049
050  static Comparator<TableDescriptor>
051      getComparator(Comparator<ColumnFamilyDescriptor> cfComparator) {
052    return (TableDescriptor lhs, TableDescriptor rhs) -> {
053      int result = lhs.getTableName().compareTo(rhs.getTableName());
054      if (result != 0) {
055        return result;
056      }
057      Collection<ColumnFamilyDescriptor> lhsFamilies = Arrays.asList(lhs.getColumnFamilies());
058      Collection<ColumnFamilyDescriptor> rhsFamilies = Arrays.asList(rhs.getColumnFamilies());
059      result = Integer.compare(lhsFamilies.size(), rhsFamilies.size());
060      if (result != 0) {
061        return result;
062      }
063
064      for (Iterator<ColumnFamilyDescriptor> it = lhsFamilies.iterator(), it2 =
065          rhsFamilies.iterator(); it.hasNext();) {
066        result = cfComparator.compare(it.next(), it2.next());
067        if (result != 0) {
068          return result;
069        }
070      }
071      // punt on comparison for ordering, just calculate difference
072      return Integer.compare(lhs.getValues().hashCode(), rhs.getValues().hashCode());
073    };
074  }
075
076  /**
077   * Returns the count of the column families of the table.
078   *
079   * @return Count of column families of the table
080   */
081  int getColumnFamilyCount();
082
083  /**
084   * Return the list of attached co-processor represented
085   *
086   * @return The list of CoprocessorDescriptor
087   */
088  Collection<CoprocessorDescriptor> getCoprocessorDescriptors();
089
090  /**
091   * Return the list of attached co-processor represented by their name
092   * className
093   * @return The list of co-processors classNames
094   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
095   *                       Use {@link #getCoprocessorDescriptors()} instead
096   */
097  @Deprecated
098  default Collection<String> getCoprocessors() {
099    return getCoprocessorDescriptors().stream()
100      .map(CoprocessorDescriptor::getClassName)
101      .collect(Collectors.toList());
102  }
103
104  /**
105   * Returns the durability setting for the table.
106   *
107   * @return durability setting for the table.
108   */
109  Durability getDurability();
110
111  /**
112   * Returns an unmodifiable collection of all the {@link ColumnFamilyDescriptor} of
113   * all the column families of the table.
114   *
115   * @return An array of {@link ColumnFamilyDescriptor} of all the column
116   * families.
117   */
118  ColumnFamilyDescriptor[] getColumnFamilies();
119
120  /**
121   * Returns all the column family names of the current table. The map of
122   * TableDescriptor contains mapping of family name to ColumnDescriptor.
123   * This returns all the keys of the family map which represents the column
124   * family names of the table.
125   *
126   * @return Immutable sorted set of the keys of the families.
127   */
128  Set<byte[]> getColumnFamilyNames();
129
130  /**
131   * Returns the ColumnDescriptor for a specific column family with name as
132   * specified by the parameter column.
133   *
134   * @param name Column family name
135   * @return Column descriptor for the passed family name or the family on
136   * passed in column.
137   */
138  ColumnFamilyDescriptor getColumnFamily(final byte[] name);
139
140  /**
141   * This gets the class associated with the flush policy which determines the
142   * stores need to be flushed when flushing a region. The class used by default
143   * is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy.
144   *
145   * @return the class name of the flush policy for this table. If this returns
146   * null, the default flush policy is used.
147   */
148  String getFlushPolicyClassName();
149
150  /**
151   * Returns the maximum size upto which a region can grow to after which a
152   * region split is triggered. The region size is represented by the size of
153   * the biggest store file in that region.
154   *
155   * @return max hregion size for table, -1 if not set.
156   */
157  long getMaxFileSize();
158
159  /**
160   * Returns the size of the memstore after which a flush to filesystem is
161   * triggered.
162   *
163   * @return memory cache flush size for each hregion, -1 if not set.
164   */
165  long getMemStoreFlushSize();
166
167  // TODO: Currently this is used RPC scheduling only. Make it more generic than this; allow it
168  // to also be priority when scheduling procedures that pertain to this table scheduling first
169  // those tables with the highest priority (From Yi Liang over on HBASE-18109).
170  int getPriority();
171
172  /**
173   * @return Returns the configured replicas per region
174   */
175  int getRegionReplication();
176
177  /**
178   * This gets the class associated with the region split policy which
179   * determines when a region split should occur. The class used by default is
180   * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
181   *
182   * @return the class name of the region split policy for this table. If this
183   * returns null, the default split policy is used.
184   */
185  String getRegionSplitPolicyClassName();
186
187  /**
188   * Get the name of the table
189   *
190   * @return TableName
191   */
192  TableName getTableName();
193
194  /**
195   * @deprecated since 2.0.0 and will be removed in 3.0.0.
196   * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a>
197   */
198  @Deprecated
199  String getOwnerString();
200
201  /**
202   * Getter for accessing the metadata associated with the key.
203   *
204   * @param key The key.
205   * @return A clone value. Null if no mapping for the key
206   */
207  Bytes getValue(Bytes key);
208
209  /**
210   * Getter for accessing the metadata associated with the key.
211   *
212   * @param key The key.
213   * @return A clone value. Null if no mapping for the key
214   */
215  byte[] getValue(byte[] key);
216
217  /**
218   * Getter for accessing the metadata associated with the key.
219   *
220   * @param key The key.
221   * @return Null if no mapping for the key
222   */
223  String getValue(String key);
224
225  /**
226   * @return Getter for fetching an unmodifiable map.
227   */
228  Map<Bytes, Bytes> getValues();
229
230  /**
231   * Check if the table has an attached co-processor represented by the name
232   * className
233   *
234   * @param classNameToMatch - Class name of the co-processor
235   * @return true of the table has a co-processor className
236   */
237  boolean hasCoprocessor(String classNameToMatch);
238
239  /**
240   * Checks to see if this table contains the given column family
241   *
242   * @param name Family name or column name.
243   * @return true if the table contains the specified family name
244   */
245  boolean hasColumnFamily(final byte[] name);
246
247  /**
248   * @return true if the read-replicas memstore replication is enabled.
249   */
250  boolean hasRegionMemStoreReplication();
251
252  /**
253   * Check if the compaction enable flag of the table is true. If flag is false
254   * then no minor/major compactions will be done in real.
255   *
256   * @return true if table compaction enabled
257   */
258  boolean isCompactionEnabled();
259
260  /**
261   * Check if the split enable flag of the table is true. If flag is false
262   * then no region split will be done.
263   *
264   * @return true if table region split enabled
265   */
266  boolean isSplitEnabled();
267
268  /**
269   * Check if the merge enable flag of the table is true. If flag is false
270   * then no region merge will be done.
271   *
272   * @return true if table region merge enabled
273   */
274  boolean isMergeEnabled();
275
276  /**
277   * Checks if this table is <code> hbase:meta </code> region.
278   *
279   * @return true if this table is <code> hbase:meta </code> region
280   */
281  boolean isMetaRegion();
282
283  /**
284   * Checks if the table is a <code>hbase:meta</code> table
285   *
286   * @return true if table is <code> hbase:meta </code> region.
287   */
288  boolean isMetaTable();
289
290  /**
291   * Check if normalization enable flag of the table is true. If flag is false
292   * then no region normalizer won't attempt to normalize this table.
293   *
294   * @return true if region normalization is enabled for this table
295   */
296  boolean isNormalizationEnabled();
297
298  /**
299   * Check if there is the target region count. If so, the normalize plan will
300   * be calculated based on the target region count.
301   *
302   * @return target region count after normalize done
303   */
304  int getNormalizerTargetRegionCount();
305
306  /**
307   * Check if there is the target region size. If so, the normalize plan will
308   * be calculated based on the target region size.
309   *
310   * @return target region size after normalize done
311   */
312  long getNormalizerTargetRegionSize();
313
314  /**
315   * Check if the readOnly flag of the table is set. If the readOnly flag is set
316   * then the contents of the table can only be read from but not modified.
317   *
318   * @return true if all columns in the table should be read only
319   */
320  boolean isReadOnly();
321
322  /**
323   * @return Name of this table and then a map of all of the column family descriptors (with only
324   *         the non-default column family attributes)
325   */
326  String toStringCustomizedValues();
327
328  /**
329   * Check if any of the table's cfs' replication scope are set to
330   * {@link HConstants#REPLICATION_SCOPE_GLOBAL}.
331   * @return {@code true} if we have, otherwise {@code false}.
332   */
333  default boolean hasGlobalReplicationScope() {
334    return Stream.of(getColumnFamilies())
335      .anyMatch(cf -> cf.getScope() == HConstants.REPLICATION_SCOPE_GLOBAL);
336  }
337
338  /**
339   * Check if the table's cfs' replication scope matched with the replication state
340   * @param enabled replication state
341   * @return true if matched, otherwise false
342   */
343  default boolean matchReplicationScope(boolean enabled) {
344    boolean hasEnabled = false;
345    boolean hasDisabled = false;
346
347    for (ColumnFamilyDescriptor cf : getColumnFamilies()) {
348      if (cf.getScope() != HConstants.REPLICATION_SCOPE_GLOBAL) {
349        hasDisabled = true;
350      } else {
351        hasEnabled = true;
352      }
353    }
354
355    if (hasEnabled && hasDisabled) {
356      return false;
357    }
358    if (hasEnabled) {
359      return enabled;
360    }
361    return !enabled;
362  }
363}