001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertTrue;
022
023import java.io.IOException;
024import java.util.ArrayList;
025import java.util.List;
026import org.apache.hadoop.fs.Path;
027import org.apache.hadoop.hbase.HBaseClassTestRule;
028import org.apache.hadoop.hbase.HBaseTestingUtil;
029import org.apache.hadoop.hbase.TableName;
030import org.apache.hadoop.hbase.client.Put;
031import org.apache.hadoop.hbase.client.RegionInfo;
032import org.apache.hadoop.hbase.client.RegionInfoBuilder;
033import org.apache.hadoop.hbase.client.Table;
034import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure;
035import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
036import org.apache.hadoop.hbase.procedure2.Procedure;
037import org.apache.hadoop.hbase.testclassification.LargeTests;
038import org.apache.hadoop.hbase.testclassification.RegionServerTests;
039import org.apache.hadoop.hbase.util.Bytes;
040import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
041import org.junit.AfterClass;
042import org.junit.BeforeClass;
043import org.junit.ClassRule;
044import org.junit.Rule;
045import org.junit.Test;
046import org.junit.experimental.categories.Category;
047import org.junit.rules.TestName;
048
049@Category({ RegionServerTests.class, LargeTests.class })
050public class TestDirectStoreSplitsMerges {
051
052  @ClassRule
053  public static final HBaseClassTestRule CLASS_RULE =
054    HBaseClassTestRule.forClass(TestDirectStoreSplitsMerges.class);
055
056  private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
057
058  public static final byte[] FAMILY_NAME = Bytes.toBytes("info");
059
060  @Rule
061  public TestName name = new TestName();
062
063  @BeforeClass
064  public static void setup() throws Exception {
065    TEST_UTIL.startMiniCluster();
066  }
067
068  @AfterClass
069  public static void after() throws Exception {
070    TEST_UTIL.shutdownMiniCluster();
071  }
072
073  @Test
074  public void testSplitStoreDir() throws Exception {
075    TableName table = TableName.valueOf(name.getMethodName());
076    TEST_UTIL.createTable(table, FAMILY_NAME);
077    // first put some data in order to have a store file created
078    putThreeRowsAndFlush(table);
079    HRegion region = TEST_UTIL.getHBaseCluster().getRegions(table).get(0);
080    HRegionFileSystem regionFS = region.getStores().get(0).getRegionFileSystem();
081    RegionInfo daughterA =
082      RegionInfoBuilder.newBuilder(table).setStartKey(region.getRegionInfo().getStartKey())
083        .setEndKey(Bytes.toBytes("002")).setSplit(false)
084        .setRegionId(region.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime())
085        .build();
086    HStoreFile file = (HStoreFile) region.getStore(FAMILY_NAME).getStorefiles().toArray()[0];
087    Path result = regionFS.splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file,
088      Bytes.toBytes("002"), false, region.getSplitPolicy());
089    // asserts the reference file naming is correct
090    validateResultingFile(region.getRegionInfo().getEncodedName(), result);
091    // Additionally check if split region dir was created directly under table dir, not on .tmp
092    Path resultGreatGrandParent = result.getParent().getParent().getParent();
093    assertEquals(regionFS.getTableDir().getName(), resultGreatGrandParent.getName());
094  }
095
096  @Test
097  public void testMergeStoreFile() throws Exception {
098    TableName table = TableName.valueOf(name.getMethodName());
099    TEST_UTIL.createTable(table, FAMILY_NAME);
100    // splitting the table first
101    TEST_UTIL.getAdmin().split(table, Bytes.toBytes("002"));
102    waitForSplitProcComplete(1000, 10);
103    // Add data and flush to create files in the two different regions
104    putThreeRowsAndFlush(table);
105    List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(table);
106    HRegion first = regions.get(0);
107    HRegion second = regions.get(1);
108    HRegionFileSystem regionFS = first.getRegionFileSystem();
109
110    RegionInfo mergeResult =
111      RegionInfoBuilder.newBuilder(table).setStartKey(first.getRegionInfo().getStartKey())
112        .setEndKey(second.getRegionInfo().getEndKey()).setSplit(false)
113        .setRegionId(first.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime())
114        .build();
115
116    HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem(
117      TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), regionFS.getFileSystem(),
118      regionFS.getTableDir(), mergeResult);
119
120    // merge file from first region
121    HStoreFile file = (HStoreFile) first.getStore(FAMILY_NAME).getStorefiles().toArray()[0];
122    mergeFileFromRegion(mergeRegionFs, first, file);
123    // merge file from second region
124    file = (HStoreFile) second.getStore(FAMILY_NAME).getStorefiles().toArray()[0];
125    mergeFileFromRegion(mergeRegionFs, second, file);
126  }
127
128  @Test
129  public void testCommitDaughterRegionNoFiles() throws Exception {
130    TableName table = TableName.valueOf(name.getMethodName());
131    TEST_UTIL.createTable(table, FAMILY_NAME);
132    HRegion region = TEST_UTIL.getHBaseCluster().getRegions(table).get(0);
133    HRegionFileSystem regionFS = region.getStores().get(0).getRegionFileSystem();
134    RegionInfo daughterA =
135      RegionInfoBuilder.newBuilder(table).setStartKey(region.getRegionInfo().getStartKey())
136        .setEndKey(Bytes.toBytes("002")).setSplit(false)
137        .setRegionId(region.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime())
138        .build();
139    Path splitDir = regionFS.getSplitsDir(daughterA);
140    MasterProcedureEnv env =
141      TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment();
142    Path result = regionFS.commitDaughterRegion(daughterA, new ArrayList<>(), env);
143    assertEquals(splitDir, result);
144  }
145
146  @Test
147  public void testCommitDaughterRegionWithFiles() throws Exception {
148    TableName table = TableName.valueOf(name.getMethodName());
149    TEST_UTIL.createTable(table, FAMILY_NAME);
150    // first put some data in order to have a store file created
151    putThreeRowsAndFlush(table);
152    HRegion region = TEST_UTIL.getHBaseCluster().getRegions(table).get(0);
153    HRegionFileSystem regionFS = region.getStores().get(0).getRegionFileSystem();
154    RegionInfo daughterA =
155      RegionInfoBuilder.newBuilder(table).setStartKey(region.getRegionInfo().getStartKey())
156        .setEndKey(Bytes.toBytes("002")).setSplit(false)
157        .setRegionId(region.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime())
158        .build();
159    RegionInfo daughterB = RegionInfoBuilder.newBuilder(table).setStartKey(Bytes.toBytes("002"))
160      .setEndKey(region.getRegionInfo().getEndKey()).setSplit(false)
161      .setRegionId(region.getRegionInfo().getRegionId()).build();
162    Path splitDirA = regionFS.getSplitsDir(daughterA);
163    Path splitDirB = regionFS.getSplitsDir(daughterB);
164    HStoreFile file = (HStoreFile) region.getStore(FAMILY_NAME).getStorefiles().toArray()[0];
165    List<Path> filesA = new ArrayList<>();
166    filesA.add(regionFS.splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file,
167      Bytes.toBytes("002"), false, region.getSplitPolicy()));
168    List<Path> filesB = new ArrayList<>();
169    filesB.add(regionFS.splitStoreFile(daughterB, Bytes.toString(FAMILY_NAME), file,
170      Bytes.toBytes("002"), true, region.getSplitPolicy()));
171    MasterProcedureEnv env =
172      TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment();
173    Path resultA = regionFS.commitDaughterRegion(daughterA, filesA, env);
174    Path resultB = regionFS.commitDaughterRegion(daughterB, filesB, env);
175    assertEquals(splitDirA, resultA);
176    assertEquals(splitDirB, resultB);
177  }
178
179  @Test
180  public void testCommitMergedRegion() throws Exception {
181    TableName table = TableName.valueOf(name.getMethodName());
182    TEST_UTIL.createTable(table, FAMILY_NAME);
183    // splitting the table first
184    TEST_UTIL.getAdmin().split(table, Bytes.toBytes("002"));
185    waitForSplitProcComplete(1000, 10);
186    // Add data and flush to create files in the two different regions
187    putThreeRowsAndFlush(table);
188    List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(table);
189    HRegion first = regions.get(0);
190    HRegion second = regions.get(1);
191    HRegionFileSystem regionFS = first.getRegionFileSystem();
192
193    RegionInfo mergeResult =
194      RegionInfoBuilder.newBuilder(table).setStartKey(first.getRegionInfo().getStartKey())
195        .setEndKey(second.getRegionInfo().getEndKey()).setSplit(false)
196        .setRegionId(first.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime())
197        .build();
198
199    HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem(
200      TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), regionFS.getFileSystem(),
201      regionFS.getTableDir(), mergeResult);
202
203    // merge file from first region
204    HStoreFile file = (HStoreFile) first.getStore(FAMILY_NAME).getStorefiles().toArray()[0];
205    mergeFileFromRegion(mergeRegionFs, first, file);
206    // merge file from second region
207    file = (HStoreFile) second.getStore(FAMILY_NAME).getStorefiles().toArray()[0];
208    List<Path> mergedFiles = new ArrayList<>();
209    mergedFiles.add(mergeFileFromRegion(mergeRegionFs, second, file));
210    MasterProcedureEnv env =
211      TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment();
212    mergeRegionFs.commitMergedRegion(mergedFiles, env);
213  }
214
215  private void waitForSplitProcComplete(int attempts, int waitTime) throws Exception {
216    List<Procedure<?>> procedures = TEST_UTIL.getHBaseCluster().getMaster().getProcedures();
217    if (procedures.size() > 0) {
218      Procedure splitProc =
219        procedures.stream().filter(p -> p instanceof SplitTableRegionProcedure).findFirst().get();
220      int count = 0;
221      while ((splitProc.isWaiting() || splitProc.isRunnable()) && count < attempts) {
222        synchronized (splitProc) {
223          splitProc.wait(waitTime);
224        }
225        count++;
226      }
227      assertTrue(splitProc.isSuccess());
228    }
229  }
230
231  private Path mergeFileFromRegion(HRegionFileSystem regionFS, HRegion regionToMerge,
232    HStoreFile file) throws IOException {
233    Path mergedFile =
234      regionFS.mergeStoreFile(regionToMerge.getRegionInfo(), Bytes.toString(FAMILY_NAME), file);
235    validateResultingFile(regionToMerge.getRegionInfo().getEncodedName(), mergedFile);
236    return mergedFile;
237  }
238
239  private void validateResultingFile(String originalRegion, Path result) {
240    assertEquals(originalRegion, result.getName().split("\\.")[1]);
241    // asserts we are under the cf directory
242    Path resultParent = result.getParent();
243    assertEquals(Bytes.toString(FAMILY_NAME), resultParent.getName());
244  }
245
246  private void putThreeRowsAndFlush(TableName table) throws IOException {
247    Table tbl = TEST_UTIL.getConnection().getTable(table);
248    Put put = new Put(Bytes.toBytes("001"));
249    byte[] qualifier = Bytes.toBytes("1");
250    put.addColumn(FAMILY_NAME, qualifier, Bytes.toBytes(1));
251    tbl.put(put);
252    put = new Put(Bytes.toBytes("002"));
253    put.addColumn(FAMILY_NAME, qualifier, Bytes.toBytes(2));
254    tbl.put(put);
255    put = new Put(Bytes.toBytes("003"));
256    put.addColumn(FAMILY_NAME, qualifier, Bytes.toBytes(2));
257    tbl.put(put);
258    TEST_UTIL.flush(table);
259  }
260}