1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.procedure2.util;
20
21 import java.io.IOException;
22 import java.io.OutputStream;
23 import java.util.Arrays;
24
25 import org.apache.hadoop.hbase.classification.InterfaceAudience;
26 import org.apache.hadoop.hbase.classification.InterfaceStability;
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43 @InterfaceAudience.Private
44 @InterfaceStability.Evolving
45 public class ByteSlot extends OutputStream {
46 private static final int DOUBLE_GROW_LIMIT = 1 << 20;
47 private static final int GROW_ALIGN = 128;
48
49 private byte[] buf;
50 private int head;
51 private int size;
52
53 public void reset() {
54 head = 0;
55 size = 0;
56 }
57
58 public void markHead() {
59 head = size;
60 }
61
62 public int getHead() {
63 return head;
64 }
65
66 public int size() {
67 return size;
68 }
69
70 public byte[] getBuffer() {
71 return buf;
72 }
73
74 public void writeAt(int offset, int b) {
75 head = Math.min(head, offset);
76 buf[offset] = (byte)b;
77 }
78
79 public void write(int b) {
80 ensureCapacity(size + 1);
81 buf[size++] = (byte)b;
82 }
83
84 public void write(byte[] b, int off, int len) {
85 ensureCapacity(size + len);
86 System.arraycopy(b, off, buf, size, len);
87 size += len;
88 }
89
90 public void writeTo(final OutputStream stream) throws IOException {
91 if (head != 0) {
92 stream.write(buf, head, size - head);
93 stream.write(buf, 0, head);
94 } else {
95 stream.write(buf, 0, size);
96 }
97 }
98
99 private void ensureCapacity(int minCapacity) {
100 minCapacity = (minCapacity + (GROW_ALIGN - 1)) & -GROW_ALIGN;
101 if (buf == null) {
102 buf = new byte[minCapacity];
103 } else if (minCapacity > buf.length) {
104 int newCapacity = buf.length << 1;
105 if (minCapacity > newCapacity || newCapacity > DOUBLE_GROW_LIMIT) {
106 newCapacity = minCapacity;
107 }
108 buf = Arrays.copyOf(buf, newCapacity);
109 }
110 }
111 }