001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase; 019 020import static org.junit.jupiter.api.Assertions.assertArrayEquals; 021import static org.junit.jupiter.api.Assertions.assertEquals; 022import static org.junit.jupiter.api.Assertions.assertFalse; 023import static org.junit.jupiter.api.Assertions.assertNotEquals; 024import static org.junit.jupiter.api.Assertions.assertNotNull; 025import static org.junit.jupiter.api.Assertions.assertTrue; 026 027import java.io.IOException; 028import java.util.ArrayList; 029import java.util.List; 030import java.util.NavigableMap; 031import org.apache.hadoop.hbase.TimestampTestBase.FlushCache; 032import org.apache.hadoop.hbase.client.Admin; 033import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 034import org.apache.hadoop.hbase.client.Get; 035import org.apache.hadoop.hbase.client.Put; 036import org.apache.hadoop.hbase.client.Result; 037import org.apache.hadoop.hbase.client.ResultScanner; 038import org.apache.hadoop.hbase.client.Scan; 039import org.apache.hadoop.hbase.client.Table; 040import org.apache.hadoop.hbase.client.TableDescriptor; 041import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 042import org.apache.hadoop.hbase.testclassification.MediumTests; 043import org.apache.hadoop.hbase.testclassification.MiscTests; 044import org.apache.hadoop.hbase.util.Bytes; 045import org.apache.hadoop.hbase.util.Pair; 046import org.junit.jupiter.api.AfterAll; 047import org.junit.jupiter.api.BeforeAll; 048import org.junit.jupiter.api.BeforeEach; 049import org.junit.jupiter.api.Tag; 050import org.junit.jupiter.api.Test; 051import org.junit.jupiter.api.TestInfo; 052import org.slf4j.Logger; 053import org.slf4j.LoggerFactory; 054 055import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; 056 057/** 058 * Port of old TestScanMultipleVersions, TestTimestamp and TestGetRowVersions from old testing 059 * framework to {@link HBaseTestingUtil}. 060 */ 061@Tag(MiscTests.TAG) 062@Tag(MediumTests.TAG) 063public class TestMultiVersions { 064 065 private static final Logger LOG = LoggerFactory.getLogger(TestMultiVersions.class); 066 private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); 067 private Admin admin; 068 069 private static final int NUM_SLAVES = 3; 070 071 @BeforeAll 072 public static void setUpBeforeClass() throws Exception { 073 UTIL.startMiniCluster(NUM_SLAVES); 074 } 075 076 @AfterAll 077 public static void tearDownAfterClass() throws Exception { 078 UTIL.shutdownMiniCluster(); 079 } 080 081 @BeforeEach 082 public void before() throws MasterNotRunningException, ZooKeeperConnectionException, IOException { 083 this.admin = UTIL.getAdmin(); 084 } 085 086 /** 087 * Tests user specifiable time stamps putting, getting and scanning. Also tests same in presence 088 * of deletes. Test cores are written so can be run against an HRegion and against an HTable: i.e. 089 * both local and remote. 090 * <p> 091 * Port of old TestTimestamp test to here so can better utilize the spun up cluster running more 092 * than a single test per spin up. Keep old tests' crazyness. 093 */ 094 @Test 095 public void testTimestamps(TestInfo testInfo) throws Exception { 096 TableDescriptor tableDescriptor = 097 TableDescriptorBuilder.newBuilder(TableName.valueOf(testInfo.getTestMethod().get().getName())) 098 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(TimestampTestBase.FAMILY_NAME) 099 .setMaxVersions(3).build()) 100 .build(); 101 this.admin.createTable(tableDescriptor); 102 Table table = UTIL.getConnection().getTable(tableDescriptor.getTableName()); 103 // TODO: Remove these deprecated classes or pull them in here if this is 104 // only test using them. 105 TimestampTestBase.doTestDelete(table, new FlushCache() { 106 @Override 107 public void flushcache() throws IOException { 108 UTIL.getHBaseCluster().flushcache(); 109 } 110 }); 111 112 // Perhaps drop and readd the table between tests so the former does 113 // not pollute this latter? Or put into separate tests. 114 TimestampTestBase.doTestTimestampScanning(table, new FlushCache() { 115 @Override 116 public void flushcache() throws IOException { 117 UTIL.getMiniHBaseCluster().flushcache(); 118 } 119 }); 120 121 table.close(); 122 } 123 124 /** 125 * Verifies versions across a cluster restart. 126 * <p/> 127 * Port of old TestGetRowVersions test to here so can better utilize the spun up cluster running 128 * more than a single test per spin up. Keep old tests' crazyness. 129 */ 130 @Test 131 public void testGetRowVersions(TestInfo testInfo) throws Exception { 132 final byte[] contents = Bytes.toBytes("contents"); 133 final byte[] row = Bytes.toBytes("row"); 134 final byte[] value1 = Bytes.toBytes("value1"); 135 final byte[] value2 = Bytes.toBytes("value2"); 136 final long timestamp1 = 100L; 137 final long timestamp2 = 200L; 138 TableDescriptor tableDescriptor = TableDescriptorBuilder 139 .newBuilder(TableName.valueOf(testInfo.getTestMethod().get().getName())) 140 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(contents).setMaxVersions(3).build()) 141 .build(); 142 this.admin.createTable(tableDescriptor); 143 Put put = new Put(row, timestamp1); 144 put.addColumn(contents, contents, value1); 145 Table table = UTIL.getConnection().getTable(tableDescriptor.getTableName()); 146 table.put(put); 147 // Shut down and restart the HBase cluster 148 table.close(); 149 UTIL.shutdownMiniHBaseCluster(); 150 LOG.debug("HBase cluster shut down -- restarting"); 151 StartTestingClusterOption option = 152 StartTestingClusterOption.builder().numRegionServers(NUM_SLAVES).build(); 153 UTIL.startMiniHBaseCluster(option); 154 // Make a new connection. 155 table = UTIL.getConnection().getTable(tableDescriptor.getTableName()); 156 // Overwrite previous value 157 put = new Put(row, timestamp2); 158 put.addColumn(contents, contents, value2); 159 table.put(put); 160 // Now verify that getRow(row, column, latest) works 161 Get get = new Get(row); 162 // Should get one version by default 163 Result r = table.get(get); 164 assertNotNull(r); 165 assertFalse(r.isEmpty()); 166 assertEquals(1, r.size()); 167 byte[] value = r.getValue(contents, contents); 168 assertNotEquals(0, value.length); 169 assertTrue(Bytes.equals(value, value2)); 170 // Now check getRow with multiple versions 171 get = new Get(row); 172 get.readAllVersions(); 173 r = table.get(get); 174 assertEquals(2, r.size()); 175 value = r.getValue(contents, contents); 176 assertNotEquals(0, value.length); 177 assertArrayEquals(value, value2); 178 NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = r.getMap(); 179 NavigableMap<byte[], NavigableMap<Long, byte[]>> familyMap = map.get(contents); 180 NavigableMap<Long, byte[]> versionMap = familyMap.get(contents); 181 assertEquals(2, versionMap.size()); 182 assertArrayEquals(value1, versionMap.get(timestamp1)); 183 assertArrayEquals(value2, versionMap.get(timestamp2)); 184 table.close(); 185 } 186 187 /** 188 * Port of old TestScanMultipleVersions test here so can better utilize the spun up cluster 189 * running more than just a single test. Keep old tests crazyness. 190 * <p> 191 * Tests five cases of scans and timestamps. 192 */ 193 @Test 194 public void testScanMultipleVersions(TestInfo testInfo) throws Exception { 195 final TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); 196 TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) 197 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); 198 199 final byte[][] rows = new byte[][] { Bytes.toBytes("row_0200"), Bytes.toBytes("row_0800") }; 200 final byte[][] splitRows = new byte[][] { Bytes.toBytes("row_0500") }; 201 final long[] timestamp = new long[] { 100L, 1000L }; 202 this.admin.createTable(tableDescriptor, splitRows); 203 Table table = UTIL.getConnection().getTable(tableName); 204 // Assert we got the region layout wanted. 205 Pair<byte[][], byte[][]> keys = 206 UTIL.getConnection().getRegionLocator(tableName).getStartEndKeys(); 207 assertEquals(2, keys.getFirst().length); 208 byte[][] startKeys = keys.getFirst(); 209 byte[][] endKeys = keys.getSecond(); 210 211 for (int i = 0; i < startKeys.length; i++) { 212 if (i == 0) { 213 assertArrayEquals(HConstants.EMPTY_START_ROW, startKeys[i]); 214 assertArrayEquals(endKeys[i], splitRows[0]); 215 } else if (i == 1) { 216 assertArrayEquals(splitRows[0], startKeys[i]); 217 assertArrayEquals(endKeys[i], HConstants.EMPTY_END_ROW); 218 } 219 } 220 // Insert data 221 List<Put> puts = new ArrayList<>(); 222 for (int i = 0; i < startKeys.length; i++) { 223 for (int j = 0; j < timestamp.length; j++) { 224 Put put = new Put(rows[i], timestamp[j]); 225 put.addColumn(HConstants.CATALOG_FAMILY, null, timestamp[j], Bytes.toBytes(timestamp[j])); 226 puts.add(put); 227 } 228 } 229 table.put(puts); 230 // There are 5 cases we have to test. Each is described below. 231 for (int i = 0; i < rows.length; i++) { 232 for (int j = 0; j < timestamp.length; j++) { 233 Get get = new Get(rows[i]); 234 get.addFamily(HConstants.CATALOG_FAMILY); 235 get.setTimestamp(timestamp[j]); 236 Result result = table.get(get); 237 int cellCount = result.rawCells().length; 238 assertEquals(1, cellCount); 239 } 240 } 241 242 // Case 1: scan with LATEST_TIMESTAMP. Should get two rows 243 int count; 244 Scan scan = new Scan(); 245 scan.addFamily(HConstants.CATALOG_FAMILY); 246 try (ResultScanner s = table.getScanner(scan)) { 247 count = Iterables.size(s); 248 } 249 assertEquals(2, count, "Number of rows should be 2"); 250 251 // Case 2: Scan with a timestamp greater than most recent timestamp 252 // (in this case > 1000 and < LATEST_TIMESTAMP. Should get 2 rows. 253 scan = new Scan(); 254 scan.setTimeRange(1000L, Long.MAX_VALUE); 255 scan.addFamily(HConstants.CATALOG_FAMILY); 256 try (ResultScanner s = table.getScanner(scan)) { 257 count = Iterables.size(s); 258 } 259 assertEquals(2, count, "Number of rows should be 2"); 260 261 // Case 3: scan with timestamp equal to most recent timestamp 262 // (in this case == 1000. Should get 2 rows. 263 scan = new Scan(); 264 scan.setTimestamp(1000L); 265 scan.addFamily(HConstants.CATALOG_FAMILY); 266 try (ResultScanner s = table.getScanner(scan)) { 267 count = Iterables.size(s); 268 } 269 assertEquals(2, count, "Number of rows should be 2"); 270 271 // Case 4: scan with timestamp greater than first timestamp but less than 272 // second timestamp (100 < timestamp < 1000). Should get 2 rows. 273 scan = new Scan(); 274 scan.setTimeRange(100L, 1000L); 275 scan.addFamily(HConstants.CATALOG_FAMILY); 276 try (ResultScanner s = table.getScanner(scan)) { 277 count = Iterables.size(s); 278 } 279 assertEquals(2, count, "Number of rows should be 2"); 280 281 // Case 5: scan with timestamp equal to first timestamp (100) 282 // Should get 2 rows. 283 scan = new Scan(); 284 scan.setTimestamp(100L); 285 scan.addFamily(HConstants.CATALOG_FAMILY); 286 try (ResultScanner s = table.getScanner(scan)) { 287 count = Iterables.size(s); 288 } 289 assertEquals(2, count, "Number of rows should be 2"); 290 } 291 292}