001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.util.hbck;
019
020import static org.junit.Assert.assertEquals;
021
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.List;
025
026import org.apache.hadoop.conf.Configuration;
027import org.apache.hadoop.fs.FSDataOutputStream;
028import org.apache.hadoop.fs.FileSystem;
029import org.apache.hadoop.fs.Path;
030import org.apache.hadoop.hbase.HBaseTestingUtility;
031import org.apache.hadoop.hbase.HColumnDescriptor;
032import org.apache.hadoop.hbase.HRegionLocation;
033import org.apache.hadoop.hbase.HTableDescriptor;
034import org.apache.hadoop.hbase.MetaTableAccessor;
035import org.apache.hadoop.hbase.NamespaceDescriptor;
036import org.apache.hadoop.hbase.ServerName;
037import org.apache.hadoop.hbase.TableName;
038import org.apache.hadoop.hbase.client.Admin;
039import org.apache.hadoop.hbase.client.Connection;
040import org.apache.hadoop.hbase.client.ConnectionFactory;
041import org.apache.hadoop.hbase.client.Delete;
042import org.apache.hadoop.hbase.client.Put;
043import org.apache.hadoop.hbase.client.RegionInfo;
044import org.apache.hadoop.hbase.client.RegionInfoBuilder;
045import org.apache.hadoop.hbase.client.RegionLocator;
046import org.apache.hadoop.hbase.client.Result;
047import org.apache.hadoop.hbase.client.ResultScanner;
048import org.apache.hadoop.hbase.client.Scan;
049import org.apache.hadoop.hbase.client.Table;
050import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
051import org.apache.hadoop.hbase.testclassification.LargeTests;
052import org.apache.hadoop.hbase.testclassification.MiscTests;
053import org.apache.hadoop.hbase.util.Bytes;
054import org.apache.hadoop.hbase.util.FSUtils;
055import org.apache.zookeeper.KeeperException;
056import org.junit.After;
057import org.junit.Before;
058import org.junit.experimental.categories.Category;
059import org.slf4j.Logger;
060import org.slf4j.LoggerFactory;
061
062/**
063 * This testing base class creates a minicluster and testing table table
064 * and shuts down the cluster afterwards. It also provides methods wipes out
065 * meta and to inject errors into meta and the file system.
066 *
067 * Tests should generally break stuff, then attempt to rebuild the meta table
068 * offline, then restart hbase, and finally perform checks.
069 *
070 * NOTE: This is a slow set of tests which takes ~30s each needs to run on a
071 * relatively beefy machine. It seems necessary to have each test in a new jvm
072 * since minicluster startup and tear downs seem to leak file handles and
073 * eventually cause out of file handle exceptions.
074 */
075@Category({MiscTests.class, LargeTests.class})
076public class OfflineMetaRebuildTestCore {
077  private final static Logger LOG = LoggerFactory
078      .getLogger(OfflineMetaRebuildTestCore.class);
079  protected HBaseTestingUtility TEST_UTIL;
080  protected Configuration conf;
081  private final static byte[] FAM = Bytes.toBytes("fam");
082
083  // for the instance, reset every test run
084  protected Table htbl;
085  protected final static byte[][] splits = new byte[][] { Bytes.toBytes("A"),
086      Bytes.toBytes("B"), Bytes.toBytes("C") };
087
088  private final static String TABLE_BASE = "tableMetaRebuild";
089  private static int tableIdx = 0;
090  protected TableName table = TableName.valueOf("tableMetaRebuild");
091  protected Connection connection;
092
093  @Before
094  public void setUpBefore() throws Exception {
095    TEST_UTIL = new HBaseTestingUtility();
096    TEST_UTIL.getConfiguration().setInt("dfs.datanode.max.xceivers", 9192);
097    TEST_UTIL.startMiniCluster(3);
098    conf = TEST_UTIL.getConfiguration();
099    this.connection = ConnectionFactory.createConnection(conf);
100    assertEquals(0, TEST_UTIL.getAdmin().listTables().length);
101
102    // setup the table
103    table = TableName.valueOf(TABLE_BASE + "-" + tableIdx);
104    tableIdx++;
105    htbl = setupTable(table);
106    populateTable(htbl);
107    assertEquals(5, scanMeta());
108    LOG.info("Table " + table + " has " + tableRowCount(conf, table)
109        + " entries.");
110    assertEquals(16, tableRowCount(conf, table));
111    TEST_UTIL.getAdmin().disableTable(table);
112    assertEquals(1, TEST_UTIL.getAdmin().listTables().length);
113  }
114
115  @After
116  public void tearDownAfter() throws Exception {
117    if (this.htbl != null) {
118      this.htbl.close();
119      this.htbl = null;
120    }
121    this.connection.close();
122    TEST_UTIL.shutdownMiniCluster();
123  }
124
125  /**
126   * Setup a clean table before we start mucking with it.
127   *
128   * @throws IOException
129   * @throws InterruptedException
130   * @throws KeeperException
131   */
132  private Table setupTable(TableName tablename) throws Exception {
133    HTableDescriptor desc = new HTableDescriptor(tablename);
134    HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
135    desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
136    TEST_UTIL.getAdmin().createTable(desc, splits);
137    return this.connection.getTable(tablename);
138  }
139
140  private void dumpMeta(HTableDescriptor htd) throws IOException {
141    List<byte[]> metaRows = TEST_UTIL.getMetaTableRows(htd.getTableName());
142    for (byte[] row : metaRows) {
143      LOG.info(Bytes.toString(row));
144    }
145  }
146
147  private void populateTable(Table tbl) throws IOException {
148    byte[] values = { 'A', 'B', 'C', 'D' };
149    List<Put> puts = new ArrayList<>();
150    for (int i = 0; i < values.length; i++) {
151      for (int j = 0; j < values.length; j++) {
152        Put put = new Put(new byte[] { values[i], values[j] });
153        put.addColumn(Bytes.toBytes("fam"), new byte[]{}, new byte[]{values[i],
154                values[j]});
155        puts.add(put);
156      }
157    }
158    tbl.put(puts);
159  }
160
161  protected void deleteRegion(Configuration conf, final Table tbl,
162      byte[] startKey, byte[] endKey) throws IOException {
163
164    LOG.info("Before delete:");
165    HTableDescriptor htd = tbl.getTableDescriptor();
166    dumpMeta(htd);
167
168    List<HRegionLocation> regions;
169    try(RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
170      regions = rl.getAllRegionLocations();
171    }
172
173    for (HRegionLocation e : regions) {
174      RegionInfo hri = e.getRegionInfo();
175      ServerName hsa = e.getServerName();
176      if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
177          && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {
178
179        LOG.info("RegionName: " + hri.getRegionNameAsString());
180        byte[] deleteRow = hri.getRegionName();
181        TEST_UTIL.getAdmin().unassign(deleteRow, true);
182
183        LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
184        Path rootDir = FSUtils.getRootDir(conf);
185        FileSystem fs = rootDir.getFileSystem(conf);
186        Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()),
187            hri.getEncodedName());
188        fs.delete(p, true);
189
190        try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
191          Delete delete = new Delete(deleteRow);
192          meta.delete(delete);
193        }
194      }
195      LOG.info(hri.toString() + hsa.toString());
196    }
197
198    TEST_UTIL.getMetaTableRows(htd.getTableName());
199    LOG.info("After delete:");
200    dumpMeta(htd);
201  }
202
203  protected RegionInfo createRegion(Configuration conf, final Table htbl,
204      byte[] startKey, byte[] endKey) throws IOException {
205    Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
206    HTableDescriptor htd = htbl.getTableDescriptor();
207    RegionInfo hri = RegionInfoBuilder.newBuilder(htbl.getName())
208        .setStartKey(startKey)
209        .setEndKey(endKey)
210        .build();
211
212    LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
213    Path rootDir = FSUtils.getRootDir(conf);
214    FileSystem fs = rootDir.getFileSystem(conf);
215    Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
216        hri.getEncodedName());
217    fs.mkdirs(p);
218    Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
219    FSDataOutputStream out = fs.create(riPath);
220    out.write(RegionInfo.toDelimitedByteArray(hri));
221    out.close();
222
223    // add to meta.
224    MetaTableAccessor.addRegionToMeta(TEST_UTIL.getConnection(), hri);
225    meta.close();
226    return hri;
227  }
228
229  protected void wipeOutMeta() throws IOException {
230    // Mess it up by blowing up meta.
231    Admin admin = TEST_UTIL.getAdmin();
232    Scan s = new Scan();
233    Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
234    ResultScanner scanner = meta.getScanner(s);
235    List<Delete> dels = new ArrayList<>();
236    for (Result r : scanner) {
237      RegionInfo info =
238          MetaTableAccessor.getRegionInfo(r);
239      if(info != null && !info.getTable().getNamespaceAsString()
240          .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
241        Delete d = new Delete(r.getRow());
242        dels.add(d);
243        admin.unassign(r.getRow(), true);
244      }
245    }
246    meta.delete(dels);
247    scanner.close();
248    meta.close();
249  }
250
251  /**
252   * Returns the number of rows in a given table. HBase must be up and the table
253   * should be present (will wait for timeout for a while otherwise)
254   *
255   * @return # of rows in the specified table
256   */
257  protected int tableRowCount(Configuration conf, TableName table)
258      throws IOException {
259    Table t = TEST_UTIL.getConnection().getTable(table);
260    Scan st = new Scan();
261
262    ResultScanner rst = t.getScanner(st);
263    int count = 0;
264    for (@SuppressWarnings("unused")
265    Result rt : rst) {
266      count++;
267    }
268    t.close();
269    return count;
270  }
271
272  /**
273   * Dumps hbase:meta table info
274   *
275   * @return # of entries in meta.
276   */
277  protected int scanMeta() throws IOException {
278    LOG.info("Scanning META");
279    MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection());
280    return MetaTableAccessor.fullScanRegions(TEST_UTIL.getConnection()).size();
281  }
282
283  protected HTableDescriptor[] getTables(final Configuration configuration) throws IOException {
284    HTableDescriptor[] htbls = null;
285    try (Connection connection = ConnectionFactory.createConnection(configuration)) {
286      try (Admin admin = connection.getAdmin()) {
287        htbls = admin.listTables();
288      }
289    }
290    return htbls;
291  }
292}