001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertTrue;
022
023import java.util.UUID;
024import org.apache.hadoop.fs.FileSystem;
025import org.apache.hadoop.fs.Path;
026import org.apache.hadoop.hbase.client.Put;
027import org.apache.hadoop.hbase.client.Table;
028import org.apache.hadoop.hbase.testclassification.MediumTests;
029import org.apache.hadoop.hbase.util.Bytes;
030import org.apache.hadoop.hdfs.MiniDFSCluster;
031import org.junit.ClassRule;
032import org.junit.Rule;
033import org.junit.Test;
034import org.junit.experimental.categories.Category;
035import org.junit.rules.TestName;
036
037/**
038 * Test that an HBase cluster can run on top of an existing MiniDfsCluster
039 */
040@Category(MediumTests.class)
041public class TestHBaseOnOtherDfsCluster {
042
043  @ClassRule
044  public static final HBaseClassTestRule CLASS_RULE =
045      HBaseClassTestRule.forClass(TestHBaseOnOtherDfsCluster.class);
046
047  @Rule
048  public TestName name = new TestName();
049
050  @Test
051  public void testOveralyOnOtherCluster() throws Exception {
052    // just run HDFS
053    HBaseTestingUtility util1 = new HBaseTestingUtility();
054    MiniDFSCluster dfs = util1.startMiniDFSCluster(1);
055
056    // run HBase on that HDFS
057    HBaseTestingUtility util2 = new HBaseTestingUtility();
058    // set the dfs
059    util2.setDFSCluster(dfs, false);
060    util2.startMiniCluster();
061
062    //ensure that they are pointed at the same place
063    FileSystem fs = dfs.getFileSystem();
064    FileSystem targetFs = util2.getDFSCluster().getFileSystem();
065    assertFsSameUri(fs, targetFs);
066
067    fs = FileSystem.get(util1.getConfiguration());
068    targetFs = FileSystem.get(util2.getConfiguration());
069    assertFsSameUri(fs, targetFs);
070
071    Path randomFile = new Path("/"+UUID.randomUUID());
072    assertTrue(targetFs.createNewFile(randomFile));
073    assertTrue(fs.exists(randomFile));
074
075    // do a simple create/write to ensure the cluster works as expected
076    byte[] family = Bytes.toBytes("testfamily");
077    final TableName tablename = TableName.valueOf(name.getMethodName());
078    Table table = util2.createTable(tablename, family);
079    Put p = new Put(new byte[] { 1, 2, 3 });
080    p.addColumn(family, null, new byte[] { 1 });
081    table.put(p);
082
083    // shutdown and make sure cleanly shutting down
084    util2.shutdownMiniCluster();
085    util1.shutdownMiniDFSCluster();
086  }
087
088  private void assertFsSameUri(FileSystem sourceFs, FileSystem targetFs) {
089    Path source = new Path(sourceFs.getUri());
090    Path target = new Path(targetFs.getUri());
091    assertEquals(source, target);
092  }
093}