001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import java.util.concurrent.locks.Lock; 021import java.util.concurrent.locks.ReadWriteLock; 022import java.util.concurrent.locks.ReentrantLock; 023import java.util.concurrent.locks.ReentrantReadWriteLock; 024import org.apache.hadoop.conf.Configuration; 025import org.apache.yetus.audience.InterfaceAudience; 026 027/** 028 * Lock to manage concurrency between {@link RegionScanner} and 029 * {@link HRegion#getSmallestReadPoint()}. We need to ensure that while we are calculating the 030 * smallest read point, no new scanners can modify the scannerReadPoints Map. We used to achieve 031 * this by synchronizing on the scannerReadPoints object. But this may block the read thread and 032 * reduce the read performance. Since the scannerReadPoints object is a 033 * {@link java.util.concurrent.ConcurrentHashMap}, which is thread-safe, so the 034 * {@link RegionScanner} can record their read points concurrently, what it needs to do is just 035 * acquiring a shared lock. When we calculate the smallest read point, we need to acquire an 036 * exclusive lock. This can improve read performance in most scenarios, only not when we have a lot 037 * of delta operations, like {@link org.apache.hadoop.hbase.client.Append} or 038 * {@link org.apache.hadoop.hbase.client.Increment}. So we introduce a flag to enable/disable this 039 * feature. 040 */ 041@InterfaceAudience.Private 042public class ReadPointCalculationLock { 043 044 public enum LockType { 045 CALCULATION_LOCK, 046 RECORDING_LOCK 047 } 048 049 private final boolean useReadWriteLockForReadPoints; 050 private Lock lock; 051 private ReadWriteLock readWriteLock; 052 053 ReadPointCalculationLock(Configuration conf) { 054 this.useReadWriteLockForReadPoints = 055 conf.getBoolean("hbase.region.readpoints.read.write.lock.enable", false); 056 if (useReadWriteLockForReadPoints) { 057 readWriteLock = new ReentrantReadWriteLock(); 058 } else { 059 lock = new ReentrantLock(); 060 } 061 } 062 063 void lock(LockType lockType) { 064 if (useReadWriteLockForReadPoints) { 065 assert lock == null; 066 if (lockType == LockType.CALCULATION_LOCK) { 067 readWriteLock.writeLock().lock(); 068 } else { 069 readWriteLock.readLock().lock(); 070 } 071 } else { 072 assert readWriteLock == null; 073 lock.lock(); 074 } 075 } 076 077 void unlock(LockType lockType) { 078 if (useReadWriteLockForReadPoints) { 079 assert lock == null; 080 if (lockType == LockType.CALCULATION_LOCK) { 081 readWriteLock.writeLock().unlock(); 082 } else { 083 readWriteLock.readLock().unlock(); 084 } 085 } else { 086 assert readWriteLock == null; 087 lock.unlock(); 088 } 089 } 090}