001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.constraint; 019 020import org.apache.hadoop.conf.Configurable; 021import org.apache.hadoop.hbase.client.Put; 022import org.apache.yetus.audience.InterfaceAudience; 023 024/** 025 * Apply a {@link Constraint} (in traditional database terminology) to a HTable. Any number of 026 * {@link Constraint Constraints} can be added to the table, in any order. 027 * <p> 028 * A {@link Constraint} must be added to a table before the table is loaded via 029 * {@link Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, Class[])} or 030 * {@link Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, org.apache.hadoop.hbase.util.Pair...)} 031 * (if you want to add a configuration with the {@link Constraint}). Constraints will be run in the 032 * order that they are added. Further, a Constraint will be configured before it is run (on load). 033 * <p> 034 * See {@link Constraints#enableConstraint(org.apache.hadoop.hbase.HTableDescriptor, Class)} and 035 * {@link Constraints#disableConstraint(org.apache.hadoop.hbase.HTableDescriptor, Class)} for 036 * enabling/disabling of a given {@link Constraint} after it has been added. 037 * <p> 038 * If a {@link Put} is invalid, the Constraint should throw some sort of 039 * {@link org.apache.hadoop.hbase.constraint.ConstraintException}, indicating that the {@link Put} 040 * has failed. When this exception is thrown, not further retries of the {@link Put} are attempted 041 * nor are any other {@link Constraint Constraints} attempted (the {@link Put} is clearly not 042 * valid). Therefore, there are performance implications in the order in which {@link BaseConstraint 043 * Constraints} are specified. 044 * <p> 045 * If a {@link Constraint} fails to fail the {@link Put} via a 046 * {@link org.apache.hadoop.hbase.constraint.ConstraintException}, but instead throws a 047 * {@link RuntimeException}, the entire constraint processing mechanism 048 * ({@link ConstraintProcessor}) will be unloaded from the table. This ensures that the region 049 * server is still functional, but no more {@link Put Puts} will be checked via {@link Constraint 050 * Constraints}. 051 * <p> 052 * Further, {@link Constraint Constraints} should probably not be used to enforce cross-table 053 * references as it will cause tremendous write slowdowns, but it is possible. 054 * <p> 055 * NOTE: Implementing classes must have a nullary (no-args) constructor 056 * @see BaseConstraint 057 * @see Constraints 058 */ 059@InterfaceAudience.Private 060public interface Constraint extends Configurable { 061 062 /** 063 * Check a {@link Put} to ensure it is valid for the table. If the {@link Put} is valid, then just 064 * return from the method. Otherwise, throw an {@link Exception} specifying what happened. This 065 * {@link Exception} is propagated back to the client so you can see what caused the {@link Put} 066 * to fail. 067 * @param p {@link Put} to check 068 * @throws org.apache.hadoop.hbase.constraint.ConstraintException when the {@link Put} does not 069 * match the constraint. 070 */ 071 void check(Put p) throws ConstraintException; 072 073}