001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.client;
020
021import java.util.Arrays;
022import java.util.Collection;
023import java.util.Comparator;
024import java.util.Iterator;
025import java.util.Map;
026import java.util.Optional;
027import java.util.Set;
028import java.util.stream.Stream;
029import org.apache.hadoop.hbase.HConstants;
030import org.apache.hadoop.hbase.TableName;
031import org.apache.hadoop.hbase.util.Bytes;
032import org.apache.yetus.audience.InterfaceAudience;
033
034/**
035 * TableDescriptor contains the details about an HBase table such as the descriptors of
036 * all the column families, is the table a catalog table, <code> hbase:meta </code>,
037 * if the table is read only, the maximum size of the memstore,
038 * when the region split should occur, coprocessors associated with it etc...
039 */
040@InterfaceAudience.Public
041public interface TableDescriptor {
042
043  @InterfaceAudience.Private
044  Comparator<TableDescriptor> COMPARATOR = getComparator(ColumnFamilyDescriptor.COMPARATOR);
045
046  @InterfaceAudience.Private
047  Comparator<TableDescriptor> COMPARATOR_IGNORE_REPLICATION =
048      getComparator(ColumnFamilyDescriptor.COMPARATOR_IGNORE_REPLICATION);
049
050  static Comparator<TableDescriptor>
051      getComparator(Comparator<ColumnFamilyDescriptor> cfComparator) {
052    return (TableDescriptor lhs, TableDescriptor rhs) -> {
053      int result = lhs.getTableName().compareTo(rhs.getTableName());
054      if (result != 0) {
055        return result;
056      }
057      Collection<ColumnFamilyDescriptor> lhsFamilies = Arrays.asList(lhs.getColumnFamilies());
058      Collection<ColumnFamilyDescriptor> rhsFamilies = Arrays.asList(rhs.getColumnFamilies());
059      result = Integer.compare(lhsFamilies.size(), rhsFamilies.size());
060      if (result != 0) {
061        return result;
062      }
063
064      for (Iterator<ColumnFamilyDescriptor> it = lhsFamilies.iterator(), it2 =
065          rhsFamilies.iterator(); it.hasNext();) {
066        result = cfComparator.compare(it.next(), it2.next());
067        if (result != 0) {
068          return result;
069        }
070      }
071      // punt on comparison for ordering, just calculate difference
072      return Integer.compare(lhs.getValues().hashCode(), rhs.getValues().hashCode());
073    };
074  }
075
076  /**
077   * Returns the count of the column families of the table.
078   *
079   * @return Count of column families of the table
080   */
081  int getColumnFamilyCount();
082
083  /**
084   * Return the list of attached co-processor represented
085   *
086   * @return The list of CoprocessorDescriptor
087   */
088  Collection<CoprocessorDescriptor> getCoprocessorDescriptors();
089
090  /**
091   * Returns the durability setting for the table.
092   *
093   * @return durability setting for the table.
094   */
095  Durability getDurability();
096
097  /**
098   * Returns an unmodifiable collection of all the {@link ColumnFamilyDescriptor} of
099   * all the column families of the table.
100   *
101   * @return An array of {@link ColumnFamilyDescriptor} of all the column
102   * families.
103   */
104  ColumnFamilyDescriptor[] getColumnFamilies();
105
106  /**
107   * Returns all the column family names of the current table. The map of
108   * TableDescriptor contains mapping of family name to ColumnDescriptor.
109   * This returns all the keys of the family map which represents the column
110   * family names of the table.
111   *
112   * @return Immutable sorted set of the keys of the families.
113   */
114  Set<byte[]> getColumnFamilyNames();
115
116  /**
117   * Returns the ColumnDescriptor for a specific column family with name as
118   * specified by the parameter column.
119   *
120   * @param name Column family name
121   * @return Column descriptor for the passed family name or the family on
122   * passed in column.
123   */
124  ColumnFamilyDescriptor getColumnFamily(final byte[] name);
125
126  /**
127   * This gets the class associated with the flush policy which determines the
128   * stores need to be flushed when flushing a region. The class used by default
129   * is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy.
130   *
131   * @return the class name of the flush policy for this table. If this returns
132   * null, the default flush policy is used.
133   */
134  String getFlushPolicyClassName();
135
136  /**
137   * Returns the maximum size upto which a region can grow to after which a
138   * region split is triggered. The region size is represented by the size of
139   * the biggest store file in that region.
140   *
141   * @return max hregion size for table, -1 if not set.
142   */
143  long getMaxFileSize();
144
145  /**
146   * Returns the size of the memstore after which a flush to filesystem is
147   * triggered.
148   *
149   * @return memory cache flush size for each hregion, -1 if not set.
150   */
151  long getMemStoreFlushSize();
152
153  // TODO: Currently this is used RPC scheduling only. Make it more generic than this; allow it
154  // to also be priority when scheduling procedures that pertain to this table scheduling first
155  // those tables with the highest priority (From Yi Liang over on HBASE-18109).
156  int getPriority();
157
158  /**
159   * @return Returns the configured replicas per region
160   */
161  int getRegionReplication();
162
163  /**
164   * This gets the class associated with the region split policy which
165   * determines when a region split should occur. The class used by default is
166   * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
167   *
168   * @return the class name of the region split policy for this table. If this
169   * returns null, the default split policy is used.
170   */
171  String getRegionSplitPolicyClassName();
172
173  /**
174   * Get the name of the table
175   *
176   * @return TableName
177   */
178  TableName getTableName();
179
180  /**
181   * @deprecated since 2.0.0 and will be removed in 3.0.0.
182   * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a>
183   */
184  @Deprecated
185  String getOwnerString();
186
187  /**
188   * Get the region server group this table belongs to. The regions of this table will be placed
189   * only on the region servers within this group. If not present, will be placed on
190   * {@link org.apache.hadoop.hbase.rsgroup.RSGroupInfo#DEFAULT_GROUP}.
191   */
192  Optional<String> getRegionServerGroup();
193
194  /**
195   * Getter for accessing the metadata associated with the key.
196   *
197   * @param key The key.
198   * @return A clone value. Null if no mapping for the key
199   */
200  Bytes getValue(Bytes key);
201
202  /**
203   * Getter for accessing the metadata associated with the key.
204   *
205   * @param key The key.
206   * @return A clone value. Null if no mapping for the key
207   */
208  byte[] getValue(byte[] key);
209
210  /**
211   * Getter for accessing the metadata associated with the key.
212   *
213   * @param key The key.
214   * @return Null if no mapping for the key
215   */
216  String getValue(String key);
217
218  /**
219   * @return Getter for fetching an unmodifiable map.
220   */
221  Map<Bytes, Bytes> getValues();
222
223  /**
224   * Check if the table has an attached co-processor represented by the name
225   * className
226   *
227   * @param classNameToMatch - Class name of the co-processor
228   * @return true of the table has a co-processor className
229   */
230  boolean hasCoprocessor(String classNameToMatch);
231
232  /**
233   * Checks to see if this table contains the given column family
234   *
235   * @param name Family name or column name.
236   * @return true if the table contains the specified family name
237   */
238  boolean hasColumnFamily(final byte[] name);
239
240  /**
241   * @return true if the read-replicas memstore replication is enabled.
242   */
243  boolean hasRegionMemStoreReplication();
244
245  /**
246   * Check if the compaction enable flag of the table is true. If flag is false
247   * then no minor/major compactions will be done in real.
248   *
249   * @return true if table compaction enabled
250   */
251  boolean isCompactionEnabled();
252
253  /**
254   * Check if the split enable flag of the table is true. If flag is false
255   * then no region split will be done.
256   *
257   * @return true if table region split enabled
258   */
259  boolean isSplitEnabled();
260
261  /**
262   * Check if the merge enable flag of the table is true. If flag is false
263   * then no region merge will be done.
264   *
265   * @return true if table region merge enabled
266   */
267  boolean isMergeEnabled();
268
269  /**
270   * Checks if this table is <code> hbase:meta </code> region.
271   *
272   * @return true if this table is <code> hbase:meta </code> region
273   */
274  boolean isMetaRegion();
275
276  /**
277   * Checks if the table is a <code>hbase:meta</code> table
278   *
279   * @return true if table is <code> hbase:meta </code> region.
280   */
281  boolean isMetaTable();
282
283  /**
284   * Check if normalization enable flag of the table is true. If flag is false
285   * then no region normalizer won't attempt to normalize this table.
286   *
287   * @return true if region normalization is enabled for this table
288   */
289  boolean isNormalizationEnabled();
290
291  /**
292   * Check if there is the target region count. If so, the normalize plan will
293   * be calculated based on the target region count.
294   *
295   * @return target region count after normalize done
296   */
297  int getNormalizerTargetRegionCount();
298
299  /**
300   * Check if there is the target region size. If so, the normalize plan will
301   * be calculated based on the target region size.
302   *
303   * @return target region size after normalize done
304   */
305  long getNormalizerTargetRegionSize();
306
307  /**
308   * Check if the readOnly flag of the table is set. If the readOnly flag is set
309   * then the contents of the table can only be read from but not modified.
310   *
311   * @return true if all columns in the table should be read only
312   */
313  boolean isReadOnly();
314
315  /**
316   * @return Name of this table and then a map of all of the column family descriptors (with only
317   *         the non-default column family attributes)
318   */
319  String toStringCustomizedValues();
320
321  /**
322   * Check if any of the table's cfs' replication scope are set to
323   * {@link HConstants#REPLICATION_SCOPE_GLOBAL}.
324   * @return {@code true} if we have, otherwise {@code false}.
325   */
326  default boolean hasGlobalReplicationScope() {
327    return Stream.of(getColumnFamilies())
328      .anyMatch(cf -> cf.getScope() == HConstants.REPLICATION_SCOPE_GLOBAL);
329  }
330
331  /**
332   * Check if the table's cfs' replication scope matched with the replication state
333   * @param enabled replication state
334   * @return true if matched, otherwise false
335   */
336  default boolean matchReplicationScope(boolean enabled) {
337    boolean hasEnabled = false;
338    boolean hasDisabled = false;
339
340    for (ColumnFamilyDescriptor cf : getColumnFamilies()) {
341      if (cf.getScope() != HConstants.REPLICATION_SCOPE_GLOBAL) {
342        hasDisabled = true;
343      } else {
344        hasEnabled = true;
345      }
346    }
347
348    if (hasEnabled && hasDisabled) {
349      return false;
350    }
351    if (hasEnabled) {
352      return enabled;
353    }
354    return !enabled;
355  }
356}