001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase; 019 020import static org.junit.jupiter.api.Assertions.assertTrue; 021 022import java.io.IOException; 023import java.util.Arrays; 024import java.util.List; 025import java.util.concurrent.CompletableFuture; 026import org.apache.hadoop.hbase.TestMetaTableAccessor.SpyingRpcScheduler; 027import org.apache.hadoop.hbase.TestMetaTableAccessor.SpyingRpcSchedulerFactory; 028import org.apache.hadoop.hbase.client.AsyncTable; 029import org.apache.hadoop.hbase.client.Delete; 030import org.apache.hadoop.hbase.client.Mutation; 031import org.apache.hadoop.hbase.client.Put; 032import org.apache.hadoop.hbase.client.RegionInfo; 033import org.apache.hadoop.hbase.client.RegionInfoBuilder; 034import org.apache.hadoop.hbase.regionserver.HRegionServer; 035import org.apache.hadoop.hbase.regionserver.RSRpcServices; 036import org.apache.hadoop.hbase.testclassification.MediumTests; 037import org.apache.hadoop.hbase.testclassification.MiscTests; 038import org.apache.hadoop.hbase.util.Bytes; 039import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 040import org.apache.hadoop.hbase.util.FutureUtils; 041import org.junit.jupiter.api.AfterAll; 042import org.junit.jupiter.api.BeforeAll; 043import org.junit.jupiter.api.Tag; 044import org.junit.jupiter.api.Test; 045 046import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 047import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; 048import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService; 049import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest; 050import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse; 051 052@Tag(MiscTests.TAG) 053@Tag(MediumTests.TAG) 054public class TestMetaUpdatesGoToPriorityQueue { 055 056 private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); 057 058 @BeforeAll 059 public static void beforeClass() throws Exception { 060 // This test has to be end-to-end, and do the verification from the server side 061 UTIL.getConfiguration().set(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, 062 SpyingRpcSchedulerFactory.class.getName()); 063 UTIL.startMiniCluster(); 064 } 065 066 @AfterAll 067 public static void afterClass() throws Exception { 068 UTIL.shutdownMiniCluster(); 069 } 070 071 private void multiMutate(byte[] row, List<Mutation> mutations) throws IOException { 072 MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder(); 073 for (Mutation mutation : mutations) { 074 if (mutation instanceof Put) { 075 builder.addMutationRequest( 076 ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation)); 077 } else if (mutation instanceof Delete) { 078 builder.addMutationRequest( 079 ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.DELETE, mutation)); 080 } else { 081 throw new DoNotRetryIOException( 082 "multi in MetaEditor doesn't support " + mutation.getClass().getName()); 083 } 084 } 085 MutateRowsRequest request = builder.build(); 086 AsyncTable<?> table = UTIL.getAsyncConnection().getTable(TableName.META_TABLE_NAME); 087 CompletableFuture<MutateRowsResponse> future = table.<MultiRowMutationService, 088 MutateRowsResponse> coprocessorService(MultiRowMutationService::newStub, 089 (stub, controller, done) -> stub.mutateRows(controller, request, done), row); 090 FutureUtils.get(future); 091 } 092 093 @Test 094 public void test() throws IOException, InterruptedException { 095 TableName tableName = TableName.valueOf(getClass().getSimpleName()); 096 // create a table and prepare for a manual split 097 UTIL.createTable(tableName, "cf1"); 098 UTIL.waitTableAvailable(tableName); 099 RegionInfo parent = UTIL.getAdmin().getRegions(tableName).get(0); 100 long rid = 1000; 101 byte[] splitKey = Bytes.toBytes("a"); 102 RegionInfo splitA = 103 RegionInfoBuilder.newBuilder(parent.getTable()).setStartKey(parent.getStartKey()) 104 .setEndKey(splitKey).setSplit(false).setRegionId(rid).build(); 105 RegionInfo splitB = RegionInfoBuilder.newBuilder(parent.getTable()).setStartKey(splitKey) 106 .setEndKey(parent.getEndKey()).setSplit(false).setRegionId(rid).build(); 107 108 // find the meta server 109 SingleProcessHBaseCluster cluster = UTIL.getMiniHBaseCluster(); 110 int rsIndex = cluster.getServerWithMeta(); 111 assertTrue(rsIndex >= 0); 112 HRegionServer rs = cluster.getRegionServer(rsIndex); 113 SpyingRpcScheduler scheduler = (SpyingRpcScheduler) rs.getRpcServer().getScheduler(); 114 long prevCalls = scheduler.numPriorityCalls; 115 long time = EnvironmentEdgeManager.currentTime(); 116 Put putParent = MetaTableAccessor.makePutFromRegionInfo( 117 RegionInfoBuilder.newBuilder(parent).setOffline(true).setSplit(true).build(), time); 118 MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB); 119 Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA, time); 120 Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB, time); 121 multiMutate(putParent.getRow(), Arrays.asList(putParent, putA, putB)); 122 123 assertTrue(prevCalls < scheduler.numPriorityCalls); 124 } 125}