1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.util;
20
21 import java.util.concurrent.locks.ReentrantReadWriteLock;
22
23 import org.apache.hadoop.hbase.classification.InterfaceAudience;
24
25 import com.google.common.annotations.VisibleForTesting;
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43 @InterfaceAudience.Private
44 public class IdReadWriteLock {
45
46 private static final int NB_CONCURRENT_LOCKS = 1000;
47
48
49 private final WeakObjectPool<Long, ReentrantReadWriteLock> lockPool =
50 new WeakObjectPool<Long, ReentrantReadWriteLock>(
51 new WeakObjectPool.ObjectFactory<Long, ReentrantReadWriteLock>() {
52 @Override
53 public ReentrantReadWriteLock createObject(Long id) {
54 return new ReentrantReadWriteLock();
55 }
56 }, NB_CONCURRENT_LOCKS);
57
58
59
60
61
62 public ReentrantReadWriteLock getLock(long id) {
63 lockPool.purge();
64 ReentrantReadWriteLock readWriteLock = lockPool.get(id);
65 return readWriteLock;
66 }
67
68
69 @VisibleForTesting
70 int purgeAndGetEntryPoolSize() {
71 gc();
72 Threads.sleep(200);
73 lockPool.purge();
74 return lockPool.size();
75 }
76
77 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DM_GC", justification="Intentional")
78 private void gc() {
79 System.gc();
80 }
81
82 @VisibleForTesting
83 public void waitForWaiters(long id, int numWaiters) throws InterruptedException {
84 for (ReentrantReadWriteLock readWriteLock;;) {
85 readWriteLock = lockPool.get(id);
86 if (readWriteLock != null) {
87 synchronized (readWriteLock) {
88 if (readWriteLock.getQueueLength() >= numWaiters) {
89 return;
90 }
91 }
92 }
93 Thread.sleep(50);
94 }
95 }
96 }