001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver.storefiletracker;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertNull;
022import static org.junit.Assert.assertThrows;
023import static org.junit.Assert.assertTrue;
024import static org.mockito.Mockito.mock;
025import static org.mockito.Mockito.when;
026
027import java.io.IOException;
028import org.apache.hadoop.fs.FSDataInputStream;
029import org.apache.hadoop.fs.FSDataOutputStream;
030import org.apache.hadoop.fs.FileStatus;
031import org.apache.hadoop.fs.FileSystem;
032import org.apache.hadoop.fs.Path;
033import org.apache.hadoop.hbase.HBaseClassTestRule;
034import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
035import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
036import org.apache.hadoop.hbase.regionserver.StoreContext;
037import org.apache.hadoop.hbase.testclassification.RegionServerTests;
038import org.apache.hadoop.hbase.testclassification.SmallTests;
039import org.apache.hadoop.hbase.util.Bytes;
040import org.junit.AfterClass;
041import org.junit.Before;
042import org.junit.ClassRule;
043import org.junit.Rule;
044import org.junit.Test;
045import org.junit.experimental.categories.Category;
046import org.junit.rules.TestName;
047import org.slf4j.Logger;
048import org.slf4j.LoggerFactory;
049
050import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams;
051
052import org.apache.hadoop.hbase.shaded.protobuf.generated.StoreFileTrackerProtos.StoreFileEntry;
053import org.apache.hadoop.hbase.shaded.protobuf.generated.StoreFileTrackerProtos.StoreFileList;
054
055@Category({ RegionServerTests.class, SmallTests.class })
056public class TestStoreFileListFile {
057
058  @ClassRule
059  public static final HBaseClassTestRule CLASS_RULE =
060    HBaseClassTestRule.forClass(TestStoreFileListFile.class);
061
062  private static final Logger LOG = LoggerFactory.getLogger(TestStoreFileListFile.class);
063
064  private static final HBaseCommonTestingUtil UTIL = new HBaseCommonTestingUtil();
065
066  private Path testDir;
067
068  private StoreFileListFile storeFileListFile;
069
070  @Rule
071  public TestName name = new TestName();
072
073  private StoreFileListFile create() throws IOException {
074    HRegionFileSystem hfs = mock(HRegionFileSystem.class);
075    when(hfs.getFileSystem()).thenReturn(FileSystem.get(UTIL.getConfiguration()));
076    StoreContext ctx = StoreContext.getBuilder().withFamilyStoreDirectoryPath(testDir)
077      .withRegionFileSystem(hfs).build();
078    return new StoreFileListFile(ctx);
079  }
080
081  @Before
082  public void setUp() throws IOException {
083    testDir = UTIL.getDataTestDir(name.getMethodName());
084    storeFileListFile = create();
085  }
086
087  @AfterClass
088  public static void tearDown() {
089    UTIL.cleanupTestDir();
090  }
091
092  @Test
093  public void testEmptyLoad() throws IOException {
094    assertNull(storeFileListFile.load(false));
095  }
096
097  private FileStatus getOnlyTrackerFile(FileSystem fs) throws IOException {
098    return fs.listStatus(new Path(testDir, StoreFileListFile.TRACK_FILE_DIR))[0];
099  }
100
101  private byte[] readAll(FileSystem fs, Path file) throws IOException {
102    try (FSDataInputStream in = fs.open(file)) {
103      return ByteStreams.toByteArray(in);
104    }
105  }
106
107  private void write(FileSystem fs, Path file, byte[] buf, int off, int len) throws IOException {
108    try (FSDataOutputStream out = fs.create(file, true)) {
109      out.write(buf, off, len);
110    }
111  }
112
113  @Test
114  public void testLoadPartial() throws IOException {
115    StoreFileList.Builder builder = StoreFileList.newBuilder();
116    storeFileListFile.update(builder);
117    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
118    FileStatus trackerFileStatus = getOnlyTrackerFile(fs);
119    // truncate it so we do not have enough data
120    LOG.info("Truncate file {} with size {} to {}", trackerFileStatus.getPath(),
121      trackerFileStatus.getLen(), trackerFileStatus.getLen() / 2);
122    byte[] content = readAll(fs, trackerFileStatus.getPath());
123    write(fs, trackerFileStatus.getPath(), content, 0, content.length / 2);
124    assertNull(storeFileListFile.load(false));
125  }
126
127  private void writeInt(byte[] buf, int off, int value) {
128    byte[] b = Bytes.toBytes(value);
129    for (int i = 0; i < 4; i++) {
130      buf[off + i] = b[i];
131    }
132  }
133
134  @Test
135  public void testZeroFileLength() throws IOException {
136    StoreFileList.Builder builder = StoreFileList.newBuilder();
137    storeFileListFile.update(builder);
138    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
139    FileStatus trackerFileStatus = getOnlyTrackerFile(fs);
140    // write a zero length
141    byte[] content = readAll(fs, trackerFileStatus.getPath());
142    writeInt(content, 0, 0);
143    write(fs, trackerFileStatus.getPath(), content, 0, content.length);
144    assertThrows(IOException.class, () -> storeFileListFile.load(false));
145  }
146
147  @Test
148  public void testBigFileLength() throws IOException {
149    StoreFileList.Builder builder = StoreFileList.newBuilder();
150    storeFileListFile.update(builder);
151    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
152    FileStatus trackerFileStatus = getOnlyTrackerFile(fs);
153    // write a large length
154    byte[] content = readAll(fs, trackerFileStatus.getPath());
155    writeInt(content, 0, 128 * 1024 * 1024);
156    write(fs, trackerFileStatus.getPath(), content, 0, content.length);
157    assertThrows(IOException.class, () -> storeFileListFile.load(false));
158  }
159
160  @Test
161  public void testChecksumMismatch() throws IOException {
162    StoreFileList.Builder builder = StoreFileList.newBuilder();
163    storeFileListFile.update(builder);
164    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
165    FileStatus trackerFileStatus = getOnlyTrackerFile(fs);
166    // flip one byte
167    byte[] content = readAll(fs, trackerFileStatus.getPath());
168    content[5] = (byte) ~content[5];
169    write(fs, trackerFileStatus.getPath(), content, 0, content.length);
170    assertThrows(IOException.class, () -> storeFileListFile.load(false));
171  }
172
173  @Test
174  public void testLoadNewerTrackFiles() throws IOException, InterruptedException {
175    StoreFileList.Builder builder = StoreFileList.newBuilder();
176    storeFileListFile.update(builder);
177
178    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
179    FileStatus trackFileStatus = getOnlyTrackerFile(fs);
180
181    builder.addStoreFile(StoreFileEntry.newBuilder().setName("hehe").setSize(10).build());
182    storeFileListFile = create();
183    storeFileListFile.update(builder);
184
185    // should load the list we stored the second time
186    storeFileListFile = create();
187    StoreFileList list = storeFileListFile.load(true);
188    assertEquals(1, list.getStoreFileCount());
189    // since read only is true, we should not delete the old track file
190    // the deletion is in background, so we will test it multiple times through HTU.waitFor and make
191    // sure that it is still there after timeout, i.e, the waitFor method returns -1
192    assertTrue(UTIL.waitFor(2000, 100, false, () -> !fs.exists(testDir)) < 0);
193
194    // this time read only is false, we should delete the old track file
195    list = storeFileListFile.load(false);
196    assertEquals(1, list.getStoreFileCount());
197    UTIL.waitFor(5000, () -> !fs.exists(trackFileStatus.getPath()));
198  }
199
200  // This is to simulate the scenario where a 'dead' RS perform flush or compaction on a region
201  // which has already been reassigned to another RS. This is possible in real world, usually caused
202  // by a long STW GC.
203  @Test
204  public void testConcurrentUpdate() throws IOException {
205    storeFileListFile.update(StoreFileList.newBuilder());
206
207    StoreFileListFile storeFileListFile2 = create();
208    storeFileListFile2.update(StoreFileList.newBuilder()
209      .addStoreFile(StoreFileEntry.newBuilder().setName("hehe").setSize(10).build()));
210
211    // let's update storeFileListFile several times
212    for (int i = 0; i < 10; i++) {
213      storeFileListFile.update(StoreFileList.newBuilder()
214        .addStoreFile(StoreFileEntry.newBuilder().setName("haha-" + i).setSize(100 + i).build()));
215    }
216
217    // create a new list file, make sure we load the list generate by storeFileListFile2.
218    StoreFileListFile storeFileListFile3 = create();
219    StoreFileList fileList = storeFileListFile3.load(true);
220    assertEquals(1, fileList.getStoreFileCount());
221    StoreFileEntry entry = fileList.getStoreFile(0);
222    assertEquals("hehe", entry.getName());
223    assertEquals(10, entry.getSize());
224  }
225}