001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.util;
019
020import static org.junit.Assert.assertTrue;
021
022import java.io.IOException;
023import org.apache.hadoop.conf.Configuration;
024import org.apache.hadoop.fs.FileSystem;
025import org.apache.hadoop.fs.Path;
026import org.apache.hadoop.hbase.HBaseClassTestRule;
027import org.apache.hadoop.hbase.HBaseConfiguration;
028import org.apache.hadoop.hbase.HBaseTestingUtility;
029import org.apache.hadoop.hbase.testclassification.MediumTests;
030import org.apache.hadoop.hbase.testclassification.MiscTests;
031import org.apache.hadoop.hdfs.DistributedFileSystem;
032import org.junit.Before;
033import org.junit.ClassRule;
034import org.junit.Test;
035import org.junit.experimental.categories.Category;
036import org.mockito.Mockito;
037import org.slf4j.Logger;
038import org.slf4j.LoggerFactory;
039
040/**
041 * Test our recoverLease loop against mocked up filesystem.
042 */
043@Category({MiscTests.class, MediumTests.class})
044public class TestFSHDFSUtils {
045
046  @ClassRule
047  public static final HBaseClassTestRule CLASS_RULE =
048      HBaseClassTestRule.forClass(TestFSHDFSUtils.class);
049
050  private static final Logger LOG = LoggerFactory.getLogger(TestFSHDFSUtils.class);
051  private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
052  static {
053    Configuration conf = HTU.getConfiguration();
054    conf.setInt("hbase.lease.recovery.first.pause", 10);
055    conf.setInt("hbase.lease.recovery.pause", 10);
056  };
057  private FSHDFSUtils fsHDFSUtils = new FSHDFSUtils();
058  private static Path FILE = new Path(HTU.getDataTestDir(), "file.txt");
059  long startTime = -1;
060
061  @Before
062  public void setup() {
063    this.startTime = EnvironmentEdgeManager.currentTime();
064  }
065
066  /**
067   * Test recover lease eventually succeeding.
068   */
069  @Test
070  public void testRecoverLease() throws IOException {
071    HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
072    CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class);
073    Mockito.when(reporter.progress()).thenReturn(true);
074    DistributedFileSystem dfs = Mockito.mock(DistributedFileSystem.class);
075    // Fail four times and pass on the fifth.
076    Mockito.when(dfs.recoverLease(FILE)).
077      thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(true);
078    assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter));
079    Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE);
080    // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two
081    // invocations will happen pretty fast... the we fall into the longer wait loop).
082    assertTrue((EnvironmentEdgeManager.currentTime() - this.startTime) >
083      (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000)));
084  }
085
086  /**
087   * Test that isFileClosed makes us recover lease faster.
088   */
089  @Test
090  public void testIsFileClosed() throws IOException {
091    // Make this time long so it is plain we broke out because of the isFileClosed invocation.
092    HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 100000);
093    CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class);
094    Mockito.when(reporter.progress()).thenReturn(true);
095    IsFileClosedDistributedFileSystem dfs = Mockito.mock(IsFileClosedDistributedFileSystem.class);
096    // Now make it so we fail the first two times -- the two fast invocations, then we fall into
097    // the long loop during which we will call isFileClosed.... the next invocation should
098    // therefore return true if we are to break the loop.
099    Mockito.when(dfs.recoverLease(FILE)).
100      thenReturn(false).thenReturn(false).thenReturn(true);
101    Mockito.when(dfs.isFileClosed(FILE)).thenReturn(true);
102    assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter));
103    Mockito.verify(dfs, Mockito.times(2)).recoverLease(FILE);
104    Mockito.verify(dfs, Mockito.times(1)).isFileClosed(FILE);
105  }
106
107  void testIsSameHdfs(int nnport) throws IOException {
108    try {
109      Class dfsUtilClazz = Class.forName("org.apache.hadoop.hdfs.DFSUtil");
110      dfsUtilClazz.getMethod("getNNServiceRpcAddresses", Configuration.class);
111    } catch (Exception e) {
112      LOG.info("Skip testIsSameHdfs test case because of the no-HA hadoop version.");
113      return;
114    }
115
116    Configuration conf = HBaseConfiguration.create();
117    Path srcPath = new Path("hdfs://localhost:" + nnport + "/");
118    Path desPath = new Path("hdfs://127.0.0.1/");
119    FileSystem srcFs = srcPath.getFileSystem(conf);
120    FileSystem desFs = desPath.getFileSystem(conf);
121
122    assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
123
124    desPath = new Path("hdfs://127.0.0.1:8070/");
125    desFs = desPath.getFileSystem(conf);
126    assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
127
128    desPath = new Path("hdfs://127.0.1.1:" + nnport + "/");
129    desFs = desPath.getFileSystem(conf);
130    assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
131
132    conf.set("fs.defaultFS", "hdfs://haosong-hadoop");
133    conf.set("dfs.nameservices", "haosong-hadoop");
134    conf.set("dfs.ha.namenodes.haosong-hadoop", "nn1,nn2");
135    conf.set("dfs.client.failover.proxy.provider.haosong-hadoop",
136        "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
137
138    conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.0.0.1:"+ nnport);
139    conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.10.2.1:8000");
140    desPath = new Path("/");
141    desFs = desPath.getFileSystem(conf);
142    assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
143
144    conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.10.2.1:"+nnport);
145    conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.0.0.1:8000");
146    desPath = new Path("/");
147    desFs = desPath.getFileSystem(conf);
148    assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
149  }
150
151  @Test
152  public void testIsSameHdfs() throws IOException {
153    String hadoopVersion = org.apache.hadoop.util.VersionInfo.getVersion();
154    LOG.info("hadoop version is: "  + hadoopVersion);
155    boolean isHadoop3_0_0 = hadoopVersion.startsWith("3.0.0");
156    if (isHadoop3_0_0) {
157      // Hadoop 3.0.0 alpha1+ ~ 3.0.0 GA changed default nn port to 9820.
158      // See HDFS-9427
159      testIsSameHdfs(9820);
160    } else {
161      // pre hadoop 3.0.0 defaults to port 8020
162      // Hadoop 3.0.1 changed it back to port 8020. See HDFS-12990
163      testIsSameHdfs(8020);
164    }
165  }
166
167  /**
168   * Version of DFS that has HDFS-4525 in it.
169   */
170  static class IsFileClosedDistributedFileSystem extends DistributedFileSystem {
171    /**
172     * Close status of a file. Copied over from HDFS-4525
173     * @return true if file is already closed
174     **/
175    @Override
176    public boolean isFileClosed(Path f) throws IOException{
177      return false;
178    }
179  }
180}