001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasEnded;
021import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasException;
022import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasName;
023import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasParentSpanId;
024import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasStatusWithCode;
025import static org.hamcrest.Matchers.allOf;
026import static org.hamcrest.Matchers.startsWith;
027
028import io.opentelemetry.api.trace.StatusCode;
029import io.opentelemetry.sdk.trace.data.SpanData;
030import java.util.ArrayList;
031import java.util.List;
032import java.util.function.Supplier;
033import java.util.stream.Collectors;
034import java.util.stream.Stream;
035import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate;
036import org.apache.hadoop.hbase.client.trace.StringTraceRenderer;
037import org.apache.hadoop.hbase.testclassification.ClientTests;
038import org.apache.hadoop.hbase.testclassification.LargeTests;
039import org.hamcrest.Matcher;
040import org.junit.jupiter.api.Tag;
041import org.junit.jupiter.params.provider.Arguments;
042import org.slf4j.Logger;
043import org.slf4j.LoggerFactory;
044
045@Tag(LargeTests.TAG)
046@Tag(ClientTests.TAG)
047@HBaseParameterizedTestTemplate(name = "{index}: table={0}, scan={2}")
048public class TestAsyncTableScanner extends AbstractTestAsyncTableScan {
049  private static final Logger logger = LoggerFactory.getLogger(TestAsyncTableScanner.class);
050
051  private Supplier<AsyncTable<?>> getTable;
052
053  private Supplier<Scan> scanCreator;
054
055  // tableType and scanType are only for displaying
056  public TestAsyncTableScanner(String tableType, Supplier<AsyncTable<?>> getTable, String scanType,
057    Supplier<Scan> scanCreator) {
058    this.getTable = getTable;
059    this.scanCreator = scanCreator;
060  }
061
062  public static Stream<Arguments> parameters() {
063    return getTableAndScanCreatorParams();
064  }
065
066  @Override
067  protected Scan createScan() {
068    return scanCreator.get();
069  }
070
071  @Override
072  protected List<Result> doScan(Scan scan, int closeAfter) throws Exception {
073    AsyncTable<?> table = getTable.get();
074    List<Result> results = new ArrayList<>();
075    // these tests batch settings with the sample data result in each result being
076    // split in two. so we must allow twice the expected results in order to reach
077    // our true limit. see convertFromBatchResult for details.
078    if (closeAfter > 0 && scan.getBatch() > 0) {
079      closeAfter = closeAfter * 2;
080    }
081    try (ResultScanner scanner = table.getScanner(scan)) {
082      for (Result result; (result = scanner.next()) != null;) {
083        results.add(result);
084        if (closeAfter > 0 && results.size() >= closeAfter) {
085          break;
086        }
087      }
088    }
089    if (scan.getBatch() > 0) {
090      results = convertFromBatchResult(results);
091    }
092    return results;
093  }
094
095  @Override
096  protected void assertTraceContinuity() {
097    final String parentSpanName = methodName;
098    final Matcher<SpanData> parentSpanMatcher =
099      allOf(hasName(parentSpanName), hasStatusWithCode(StatusCode.OK), hasEnded());
100    waitForSpan(parentSpanMatcher);
101
102    if (logger.isDebugEnabled()) {
103      StringTraceRenderer stringTraceRenderer =
104        new StringTraceRenderer(spanStream().collect(Collectors.toList()));
105      stringTraceRenderer.render(logger::debug);
106    }
107
108    final String parentSpanId = spanStream().filter(parentSpanMatcher::matches)
109      .max((a, b) -> Long.compare(a.getEndEpochNanos(), b.getEndEpochNanos()))
110      .map(SpanData::getSpanId).get();
111
112    waitForSpan(allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())),
113      hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.OK), hasEnded()));
114  }
115
116  @Override
117  protected void
118    assertTraceError(Matcher<io.opentelemetry.api.common.Attributes> exceptionMatcher) {
119    final String parentSpanName = methodName;
120    final Matcher<SpanData> parentSpanMatcher = allOf(hasName(parentSpanName), hasEnded());
121    waitForSpan(parentSpanMatcher);
122
123    if (logger.isDebugEnabled()) {
124      StringTraceRenderer stringTraceRenderer =
125        new StringTraceRenderer(spanStream().collect(Collectors.toList()));
126      stringTraceRenderer.render(logger::debug);
127    }
128
129    final String parentSpanId = spanStream().filter(parentSpanMatcher::matches)
130      .max((a, b) -> Long.compare(a.getEndEpochNanos(), b.getEndEpochNanos()))
131      .map(SpanData::getSpanId).get();
132
133    final Matcher<SpanData> scanOperationSpanMatcher =
134      allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())),
135        hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.ERROR),
136        hasException(exceptionMatcher), hasEnded());
137    waitForSpan(scanOperationSpanMatcher);
138  }
139}