001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertFalse;
022import static org.junit.Assert.assertNotNull;
023import static org.junit.Assert.assertTrue;
024
025import java.io.IOException;
026import java.util.ArrayList;
027import java.util.List;
028import java.util.NavigableMap;
029import org.apache.hadoop.hbase.TimestampTestBase.FlushCache;
030import org.apache.hadoop.hbase.client.Admin;
031import org.apache.hadoop.hbase.client.Get;
032import org.apache.hadoop.hbase.client.Put;
033import org.apache.hadoop.hbase.client.Result;
034import org.apache.hadoop.hbase.client.ResultScanner;
035import org.apache.hadoop.hbase.client.Scan;
036import org.apache.hadoop.hbase.client.Table;
037import org.apache.hadoop.hbase.testclassification.MediumTests;
038import org.apache.hadoop.hbase.testclassification.MiscTests;
039import org.apache.hadoop.hbase.util.Bytes;
040import org.apache.hadoop.hbase.util.Pair;
041import org.junit.AfterClass;
042import org.junit.Before;
043import org.junit.BeforeClass;
044import org.junit.ClassRule;
045import org.junit.Rule;
046import org.junit.Test;
047import org.junit.experimental.categories.Category;
048import org.junit.rules.TestName;
049import org.slf4j.Logger;
050import org.slf4j.LoggerFactory;
051
052/**
053 * Port of old TestScanMultipleVersions, TestTimestamp and TestGetRowVersions
054 * from old testing framework to {@link HBaseTestingUtility}.
055 */
056@Category({MiscTests.class, MediumTests.class})
057public class TestMultiVersions {
058
059  @ClassRule
060  public static final HBaseClassTestRule CLASS_RULE =
061      HBaseClassTestRule.forClass(TestMultiVersions.class);
062
063  private static final Logger LOG = LoggerFactory.getLogger(TestMultiVersions.class);
064  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
065  private Admin admin;
066
067  private static final int NUM_SLAVES = 3;
068
069  @Rule
070  public TestName name = new TestName();
071
072  @BeforeClass
073  public static void setUpBeforeClass() throws Exception {
074    UTIL.startMiniCluster(NUM_SLAVES);
075  }
076
077  @AfterClass
078  public static void tearDownAfterClass() throws Exception {
079    UTIL.shutdownMiniCluster();
080  }
081
082  @Before
083  public void before()
084  throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
085    this.admin = UTIL.getAdmin();
086  }
087
088  /**
089  * Tests user specifiable time stamps putting, getting and scanning.  Also
090   * tests same in presence of deletes.  Test cores are written so can be
091   * run against an HRegion and against an HTable: i.e. both local and remote.
092   *
093   * <p>Port of old TestTimestamp test to here so can better utilize the spun
094   * up cluster running more than a single test per spin up.  Keep old tests'
095   * crazyness.
096   */
097  @Test
098  public void testTimestamps() throws Exception {
099    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
100    HColumnDescriptor hcd = new HColumnDescriptor(TimestampTestBase.FAMILY_NAME);
101    hcd.setMaxVersions(3);
102    desc.addFamily(hcd);
103    this.admin.createTable(desc);
104    Table table = UTIL.getConnection().getTable(desc.getTableName());
105    // TODO: Remove these deprecated classes or pull them in here if this is
106    // only test using them.
107    TimestampTestBase.doTestDelete(table, new FlushCache() {
108      @Override
109      public void flushcache() throws IOException {
110        UTIL.getHBaseCluster().flushcache();
111      }
112     });
113
114    // Perhaps drop and readd the table between tests so the former does
115    // not pollute this latter?  Or put into separate tests.
116    TimestampTestBase.doTestTimestampScanning(table, new FlushCache() {
117      @Override
118      public void flushcache() throws IOException {
119        UTIL.getMiniHBaseCluster().flushcache();
120      }
121    });
122
123    table.close();
124  }
125
126  /**
127   * Verifies versions across a cluster restart.
128   * Port of old TestGetRowVersions test to here so can better utilize the spun
129   * up cluster running more than a single test per spin up.  Keep old tests'
130   * crazyness.
131   */
132  @Test
133  public void testGetRowVersions() throws Exception {
134    final byte [] contents = Bytes.toBytes("contents");
135    final byte [] row = Bytes.toBytes("row");
136    final byte [] value1 = Bytes.toBytes("value1");
137    final byte [] value2 = Bytes.toBytes("value2");
138    final long timestamp1 = 100L;
139    final long timestamp2 = 200L;
140    final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
141    HColumnDescriptor hcd = new HColumnDescriptor(contents);
142    hcd.setMaxVersions(3);
143    desc.addFamily(hcd);
144    this.admin.createTable(desc);
145    Put put = new Put(row, timestamp1);
146    put.addColumn(contents, contents, value1);
147    Table table = UTIL.getConnection().getTable(desc.getTableName());
148    table.put(put);
149    // Shut down and restart the HBase cluster
150    table.close();
151    UTIL.shutdownMiniHBaseCluster();
152    LOG.debug("HBase cluster shut down -- restarting");
153    StartMiniClusterOption option = StartMiniClusterOption.builder()
154        .numRegionServers(NUM_SLAVES).build();
155    UTIL.startMiniHBaseCluster(option);
156    // Make a new connection.
157    table = UTIL.getConnection().getTable(desc.getTableName());
158    // Overwrite previous value
159    put = new Put(row, timestamp2);
160    put.addColumn(contents, contents, value2);
161    table.put(put);
162    // Now verify that getRow(row, column, latest) works
163    Get get = new Get(row);
164    // Should get one version by default
165    Result r = table.get(get);
166    assertNotNull(r);
167    assertFalse(r.isEmpty());
168    assertTrue(r.size() == 1);
169    byte [] value = r.getValue(contents, contents);
170    assertTrue(value.length != 0);
171    assertTrue(Bytes.equals(value, value2));
172    // Now check getRow with multiple versions
173    get = new Get(row);
174    get.setMaxVersions();
175    r = table.get(get);
176    assertTrue(r.size() == 2);
177    value = r.getValue(contents, contents);
178    assertTrue(value.length != 0);
179    assertTrue(Bytes.equals(value, value2));
180    NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map =
181      r.getMap();
182    NavigableMap<byte[], NavigableMap<Long, byte[]>> familyMap =
183      map.get(contents);
184    NavigableMap<Long, byte[]> versionMap = familyMap.get(contents);
185    assertTrue(versionMap.size() == 2);
186    assertTrue(Bytes.equals(value1, versionMap.get(timestamp1)));
187    assertTrue(Bytes.equals(value2, versionMap.get(timestamp2)));
188    table.close();
189  }
190
191  /**
192   * Port of old TestScanMultipleVersions test here so can better utilize the
193   * spun up cluster running more than just a single test.  Keep old tests
194   * crazyness.
195   *
196   * <p>Tests five cases of scans and timestamps.
197   * @throws Exception
198   */
199  @Test
200  public void testScanMultipleVersions() throws Exception {
201    final TableName tableName = TableName.valueOf(name.getMethodName());
202    final HTableDescriptor desc = new HTableDescriptor(tableName);
203    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
204    final byte [][] rows = new byte[][] {
205      Bytes.toBytes("row_0200"),
206      Bytes.toBytes("row_0800")
207    };
208    final byte [][] splitRows = new byte[][] {Bytes.toBytes("row_0500")};
209    final long [] timestamp = new long[] {100L, 1000L};
210    this.admin.createTable(desc, splitRows);
211    Table table = UTIL.getConnection().getTable(tableName);
212    // Assert we got the region layout wanted.
213    Pair<byte[][], byte[][]> keys = UTIL.getConnection()
214        .getRegionLocator(tableName).getStartEndKeys();
215    assertEquals(2, keys.getFirst().length);
216    byte[][] startKeys = keys.getFirst();
217    byte[][] endKeys = keys.getSecond();
218
219    for (int i = 0; i < startKeys.length; i++) {
220      if (i == 0) {
221        assertTrue(Bytes.equals(HConstants.EMPTY_START_ROW, startKeys[i]));
222        assertTrue(Bytes.equals(endKeys[i], splitRows[0]));
223      } else if (i == 1) {
224        assertTrue(Bytes.equals(splitRows[0], startKeys[i]));
225        assertTrue(Bytes.equals(endKeys[i], HConstants.EMPTY_END_ROW));
226      }
227    }
228    // Insert data
229    List<Put> puts = new ArrayList<>();
230    for (int i = 0; i < startKeys.length; i++) {
231      for (int j = 0; j < timestamp.length; j++) {
232        Put put = new Put(rows[i], timestamp[j]);
233        put.addColumn(HConstants.CATALOG_FAMILY, null, timestamp[j], Bytes.toBytes(timestamp[j]));
234        puts.add(put);
235      }
236    }
237    table.put(puts);
238    // There are 5 cases we have to test. Each is described below.
239    for (int i = 0; i < rows.length; i++) {
240      for (int j = 0; j < timestamp.length; j++) {
241        Get get = new Get(rows[i]);
242        get.addFamily(HConstants.CATALOG_FAMILY);
243        get.setTimestamp(timestamp[j]);
244        Result result = table.get(get);
245        int cellCount = 0;
246        for(@SuppressWarnings("unused")Cell kv : result.listCells()) {
247          cellCount++;
248        }
249        assertTrue(cellCount == 1);
250      }
251    }
252
253    // Case 1: scan with LATEST_TIMESTAMP. Should get two rows
254    int count = 0;
255    Scan scan = new Scan();
256    scan.addFamily(HConstants.CATALOG_FAMILY);
257    ResultScanner s = table.getScanner(scan);
258    try {
259      for (Result rr = null; (rr = s.next()) != null;) {
260        System.out.println(rr.toString());
261        count += 1;
262      }
263      assertEquals("Number of rows should be 2", 2, count);
264    } finally {
265      s.close();
266    }
267
268    // Case 2: Scan with a timestamp greater than most recent timestamp
269    // (in this case > 1000 and < LATEST_TIMESTAMP. Should get 2 rows.
270
271    count = 0;
272    scan = new Scan();
273    scan.setTimeRange(1000L, Long.MAX_VALUE);
274    scan.addFamily(HConstants.CATALOG_FAMILY);
275
276    s = table.getScanner(scan);
277    try {
278      while (s.next() != null) {
279        count += 1;
280      }
281      assertEquals("Number of rows should be 2", 2, count);
282    } finally {
283      s.close();
284    }
285
286    // Case 3: scan with timestamp equal to most recent timestamp
287    // (in this case == 1000. Should get 2 rows.
288
289    count = 0;
290    scan = new Scan();
291    scan.setTimestamp(1000L);
292    scan.addFamily(HConstants.CATALOG_FAMILY);
293
294    s = table.getScanner(scan);
295    try {
296      while (s.next() != null) {
297        count += 1;
298      }
299      assertEquals("Number of rows should be 2", 2, count);
300    } finally {
301      s.close();
302    }
303
304    // Case 4: scan with timestamp greater than first timestamp but less than
305    // second timestamp (100 < timestamp < 1000). Should get 2 rows.
306
307    count = 0;
308    scan = new Scan();
309    scan.setTimeRange(100L, 1000L);
310    scan.addFamily(HConstants.CATALOG_FAMILY);
311
312    s = table.getScanner(scan);
313    try {
314      while (s.next() != null) {
315        count += 1;
316      }
317      assertEquals("Number of rows should be 2", 2, count);
318    } finally {
319      s.close();
320    }
321
322    // Case 5: scan with timestamp equal to first timestamp (100)
323    // Should get 2 rows.
324
325    count = 0;
326    scan = new Scan();
327    scan.setTimestamp(100L);
328    scan.addFamily(HConstants.CATALOG_FAMILY);
329
330    s = table.getScanner(scan);
331    try {
332      while (s.next() != null) {
333        count += 1;
334      }
335      assertEquals("Number of rows should be 2", 2, count);
336    } finally {
337      s.close();
338    }
339  }
340
341}
342